]> Pileus Git - ~andy/linux/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Thu, 12 Apr 2012 23:41:23 +0000 (19:41 -0400)
committerDavid S. Miller <davem@davemloft.net>
Thu, 12 Apr 2012 23:41:23 +0000 (19:41 -0400)
347 files changed:
Documentation/ABI/testing/sysfs-class-net-mesh
Documentation/networking/batman-adv.txt
Documentation/networking/stmmac.txt
MAINTAINERS
arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
crypto/ablkcipher.c
crypto/aead.c
crypto/ahash.c
crypto/blkcipher.c
crypto/crypto_user.c
crypto/pcompress.c
crypto/rng.c
crypto/shash.c
drivers/atm/horizon.c
drivers/hv/ring_buffer.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/netlink.c
drivers/net/can/dev.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/8390/etherh.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
drivers/net/ethernet/brocade/bna/bfi_reg.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dec/tulip/xircom_cb.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/dl2k.h
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/Makefile
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/Kconfig
drivers/net/ethernet/mellanox/mlx4/Makefile
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.h
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/neterion/vxge/vxge-main.h
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/silan/sc92031.c
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/sun/sunhme.h
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/wiznet/Kconfig [new file with mode: 0644]
drivers/net/ethernet/wiznet/Makefile [new file with mode: 0644]
drivers/net/ethernet/wiznet/w5100.c [new file with mode: 0644]
drivers/net/ethernet/wiznet/w5300.c [new file with mode: 0644]
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xscale/Kconfig
drivers/net/ethernet/xscale/Makefile
drivers/net/ethernet/xscale/ixp2000/Kconfig [deleted file]
drivers/net/ethernet/xscale/ixp2000/Makefile [deleted file]
drivers/net/ethernet/xscale/ixp2000/caleb.c [deleted file]
drivers/net/ethernet/xscale/ixp2000/caleb.h [deleted file]
drivers/net/ethernet/xscale/ixp2000/enp2611.c [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixpdev.c [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixpdev.h [deleted file]
drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h [deleted file]
drivers/net/ethernet/xscale/ixp2000/pm3386.c [deleted file]
drivers/net/ethernet/xscale/ixp2000/pm3386.h [deleted file]
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/hippi/rrunner.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/davicom.c
drivers/net/phy/dp83640.c
drivers/net/phy/marvell.c
drivers/net/ppp/pptp.c
drivers/net/team/Kconfig
drivers/net/team/Makefile
drivers/net/team/team.c
drivers/net/team/team_mode_activebackup.c
drivers/net/team/team_mode_loadbalance.c [new file with mode: 0644]
drivers/net/usb/usbnet.c
drivers/net/wireless/ath/ath6kl/testmode.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2100.h
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlwifi/iwl-testmode.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/wl12xx/testmode.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_ixp46x.c
include/linux/dcbnl.h
include/linux/ethtool.h
include/linux/filter.h
include/linux/hyperv.h
include/linux/if_link.h
include/linux/if_team.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/phy.h
include/linux/platform_data/wiznet.h [new file with mode: 0644]
include/linux/ptp_clock_kernel.h
include/linux/stmmac.h
include/net/dcbnl.h
include/net/icmp.h
include/net/ndisc.h
include/net/netlink.h
include/net/xfrm.h
net/8021q/vlan_netlink.c
net/appletalk/ddp.c
net/batman-adv/Kconfig
net/batman-adv/Makefile
net/batman-adv/bat_debugfs.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_sysfs.c
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c [new file with mode: 0644]
net/batman-adv/bridge_loop_avoidance.h [new file with mode: 0644]
net/batman-adv/hard-interface.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/originator.c
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/bridge/br_fdb.c
net/bridge/br_netlink.c
net/caif/chnl_net.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/gen_stats.c
net/core/kmap_skb.h [deleted file]
net/core/neighbour.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/utils.c
net/dcb/dcbnl.c
net/decnet/dn_dev.c
net/decnet/dn_rules.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ipv4/devinet.c
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/route.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/exthdrs_core.c
net/ipv6/fib6_rules.c
net/ipv6/icmp.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/ipv6/route.c
net/ipv6/sit.c
net/l2tp/l2tp_netlink.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_proto_udplite.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netlink/genetlink.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/phonet/pn_netlink.c
net/sched/act_api.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_meta.c
net/sched/ematch.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_choke.c
net/sched/sch_drr.c
net/sched/sch_dsmark.c
net/sched/sch_fifo.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/unix/af_unix.c
net/wireless/nl80211.c
net/wireless/wext-core.c
net/xfrm/xfrm_user.c

index b218e0f8bdb389eaaa6c14c6c071bb5d033289bc..c81fe89c4c46d4f48e7e3bc8954d587bb9e6d69d 100644 (file)
@@ -14,6 +14,15 @@ Description:
                 mesh will be sent using multiple interfaces at the
                 same time (if available).
 
+What:           /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
+Date:           November 2011
+Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Description:
+                Indicates whether the bridge loop avoidance feature
+                is enabled. This feature detects and avoids loops
+                between the mesh and devices bridged with the soft
+                interface <mesh_iface>.
+
 What:           /sys/class/net/<mesh_iface>/mesh/fragmentation
 Date:           October 2010
 Contact:        Andreas Langer <an.langer@gmx.de>
index 221ad0cdf11f2fb729b1fe2bc624b30e53d996be..220a58c2fb1190d42b05a76a039e6254f85074a0 100644 (file)
@@ -67,18 +67,18 @@ To deactivate an interface you have  to  write  "none"  into  its
 All  mesh  wide  settings  can be found in batman's own interface
 folder:
 
-#  ls  /sys/class/net/bat0/mesh/
-# aggregated_ogms   fragmentation gw_sel_class   vis_mode
-# ap_isolation      gw_bandwidth  hop_penalty
-# bonding           gw_mode       orig_interval
+# ls /sys/class/net/bat0/mesh/
+# aggregated_ogms        fragmentation          hop_penalty
+# ap_isolation           gw_bandwidth           log_level
+# bonding                gw_mode                orig_interval
+# bridge_loop_avoidance  gw_sel_class           vis_mode
 
 
 There is a special folder for debugging information:
 
 #  ls /sys/kernel/debug/batman_adv/bat0/
-#  gateways     socket        transtable_global  vis_data
-#  originators  softif_neigh  transtable_local
-
+# bla_claim_table    log                socket             transtable_local
+# gateways           originators        transtable_global  vis_data
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
@@ -202,12 +202,13 @@ abled  during run time. Following log_levels are defined:
 1 - Enable messages related to routing / flooding / broadcasting
 2 - Enable messages related to route added / changed / deleted
 4 - Enable messages related to translation table operations
-7 - Enable all messages
+8 - Enable messages related to bridge loop avoidance
+15 - enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
 
-# echo 2 > /sys/class/net/bat0/mesh/log_level
+# echo 6 > /sys/class/net/bat0/mesh/log_level
 
 will enable debug messages for when routes change.
 
index d0aeeadd264b4aec82774b095fc9efddb369eb94..ab1e8d7004c5238f9d4b30ec0137fd5f3e298226 100644 (file)
@@ -111,11 +111,12 @@ and detailed below as well:
        int phy_addr;
        int interface;
        struct stmmac_mdio_bus_data *mdio_bus_data;
-       int pbl;
+       struct stmmac_dma_cfg *dma_cfg;
        int clk_csr;
        int has_gmac;
        int enh_desc;
        int tx_coe;
+       int rx_coe;
        int bugged_jumbo;
        int pmt;
        int force_sf_dma_mode;
@@ -136,10 +137,12 @@ Where:
  o pbl: the Programmable Burst Length is maximum number of beats to
        be transferred in one DMA transaction.
        GMAC also enables the 4xPBL by default.
- o clk_csr: CSR Clock range selection.
+ o clk_csr: fixed CSR Clock range selection.
  o has_gmac: uses the GMAC core.
  o enh_desc: if sets the MAC will use the enhanced descriptor structure.
  o tx_coe: core is able to perform the tx csum in HW.
+ o rx_coe: the supports three check sum offloading engine types:
+          type_1, type_2 (full csum) and no RX coe.
  o bugged_jumbo: some HWs are not able to perform the csum in HW for
                over-sized frames due to limited buffer sizes.
                Setting this flag the csum will be done in SW on
@@ -160,7 +163,7 @@ Where:
  o custom_cfg: this is a custom configuration that can be passed while
              initialising the resources.
 
-The we have:
+For MDIO bus The we have:
 
  struct stmmac_mdio_bus_data {
        int bus_id;
@@ -177,10 +180,28 @@ Where:
  o irqs: list of IRQs, one per PHY.
  o probed_phy_irq: if irqs is NULL, use this for probed PHY.
 
+
+For DMA engine we have the following internal fields that should be
+tuned according to the HW capabilities.
+
+struct stmmac_dma_cfg {
+       int pbl;
+       int fixed_burst;
+       int burst_len_supported;
+};
+
+Where:
+ o pbl: Programmable Burst Length
+ o fixed_burst: program the DMA to use the fixed burst mode
+ o burst_len: this is the value we put in the register
+             supported values are provided as macros in
+             linux/stmmac.h header file.
+
+---
+
 Below an example how the structures above are using on ST platforms.
 
  static struct plat_stmmacenet_data stxYYY_ethernet_platform_data = {
-       .pbl = 32,
        .has_gmac = 0,
        .enh_desc = 0,
        .fix_mac_speed = stxYYY_ethernet_fix_mac_speed,
index 32671e00800dd6d9c94eebed48e8ec8b870fcc7d..6750036223d5cc91438073344c8ba5d3c3df16cc 100644 (file)
@@ -1431,6 +1431,7 @@ F:        include/linux/backlight.h
 BATMAN ADVANCED
 M:     Marek Lindner <lindner_marek@yahoo.de>
 M:     Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+M:     Antonio Quartulli <ordex@autistici.org>
 L:     b.a.t.m.a.n@lists.open-mesh.org
 W:     http://www.open-mesh.org/
 S:     Maintained
@@ -3519,12 +3520,6 @@ M:       Deepak Saxena <dsaxena@plexity.net>
 S:     Maintained
 F:     drivers/char/hw_random/ixp4xx-rng.c
 
-INTEL IXP2000 ETHERNET DRIVER
-M:     Lennert Buytenhek <kernel@wantstofly.org>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/xscale/ixp2000/
-
 INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
 M:     Jeff Kirsher <jeffrey.t.kirsher@intel.com>
 M:     Jesse Brandeburg <jesse.brandeburg@intel.com>
index 292d55ed211329752faab1dd6bfb04d9b820c957..cf03614d250d9355a116d188c3833f2f975bb418 100644 (file)
@@ -75,4 +75,7 @@ struct ixp46x_ts_regs {
 #define TX_SNAPSHOT_LOCKED (1<<0)
 #define RX_SNAPSHOT_LOCKED (1<<1)
 
+/* The ptp_ixp46x module will set this variable */
+extern int ixp46x_phc_index;
+
 #endif
index 8d3a056ebeeaf250d5ec61f4fe0cba990001528c..533de9550a8292faa59f9ad1e68348d00585f3f4 100644 (file)
@@ -397,9 +397,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-               sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+                   sizeof(struct crypto_report_blkcipher), &rblkcipher))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -478,9 +478,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-               sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+                   sizeof(struct crypto_report_blkcipher), &rblkcipher))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index e4cb35159be43fe365ec569908f84732c5c67042..0b8121ebec07c3c342f1f5b8fb211946021b6f29 100644 (file)
@@ -125,9 +125,9 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
        raead.maxauthsize = aead->maxauthsize;
        raead.ivsize = aead->ivsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
-               sizeof(struct crypto_report_aead), &raead);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
+                   sizeof(struct crypto_report_aead), &raead))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -210,9 +210,9 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
        raead.maxauthsize = aead->maxauthsize;
        raead.ivsize = aead->ivsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
-               sizeof(struct crypto_report_aead), &raead);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
+                   sizeof(struct crypto_report_aead), &raead))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 33bc9b62e9ae5dde4a977c00293efc2ca2998a71..3887856c2dd68490736eb17bca5d01eae9837aa0 100644 (file)
@@ -409,9 +409,9 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
        rhash.blocksize = alg->cra_blocksize;
        rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
-               sizeof(struct crypto_report_hash), &rhash);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
+                   sizeof(struct crypto_report_hash), &rhash))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 4dd80c725498a3bb48cd9f4c961baced47f9cc6c..a8d85a1d670e14dd388a1a38c03f25b80f61436e 100644 (file)
@@ -508,9 +508,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
        rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-               sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+                   sizeof(struct crypto_report_blkcipher), &rblkcipher))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index f1ea0a0641350cbd0f7628bc3e7edde4fc38972d..5a37eadb4e56da5c922bab5167a295432e3cca3a 100644 (file)
@@ -81,9 +81,9 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
        rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
        rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_CIPHER,
-               sizeof(struct crypto_report_cipher), &rcipher);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
+                   sizeof(struct crypto_report_cipher), &rcipher))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -96,9 +96,9 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
 
        snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
-               sizeof(struct crypto_report_comp), &rcomp);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+                   sizeof(struct crypto_report_comp), &rcomp))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -117,16 +117,16 @@ static int crypto_report_one(struct crypto_alg *alg,
        ualg->cru_flags = alg->cra_flags;
        ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
 
-       NLA_PUT_U32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority);
-
+       if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+               goto nla_put_failure;
        if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
                struct crypto_report_larval rl;
 
                snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
 
-               NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL,
-                       sizeof(struct crypto_report_larval), &rl);
-
+               if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
+                           sizeof(struct crypto_report_larval), &rl))
+                       goto nla_put_failure;
                goto out;
        }
 
index 2e458e5482d0c5708cbab83dddaad11215c28a0e..04e083ff5373528ae4cb19263cb340461827df16 100644 (file)
@@ -55,9 +55,9 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
-               sizeof(struct crypto_report_comp), &rpcomp);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+                   sizeof(struct crypto_report_comp), &rpcomp))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 64f864fa8043740f50bc186e58a2ee37d8a1499a..f3b7894dec00ee5207642f7ff95cbf8ef6aaeb2f 100644 (file)
@@ -69,9 +69,9 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        rrng.seedsize = alg->cra_rng.seedsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG,
-               sizeof(struct crypto_report_rng), &rrng);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_RNG,
+                   sizeof(struct crypto_report_rng), &rrng))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 21fc12e2378f17340d40f264af6deeab51f214d5..32067f47e6c7bfbeea673d039e7ad5bc2bf90492 100644 (file)
@@ -534,9 +534,9 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
        rhash.blocksize = alg->cra_blocksize;
        rhash.digestsize = salg->digestsize;
 
-       NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
-               sizeof(struct crypto_report_hash), &rhash);
-
+       if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
+                   sizeof(struct crypto_report_hash), &rhash))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 75fd691cd43ee1307d6e2c28f4b7687a702e6569..7d01c2a7525657f1da1654b4cf40ea04aec00b46 100644 (file)
@@ -2182,7 +2182,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
     default:
       PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
       return -EINVAL;
-      break;
   }
   
   // TX traffic parameters
@@ -2357,7 +2356,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
       default: {
        PRINTD (DBG_QOS, "unsupported TX traffic class");
        return -EINVAL;
-       break;
       }
     }
   }
@@ -2433,7 +2431,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
       default: {
        PRINTD (DBG_QOS, "unsupported RX traffic class");
        return -EINVAL;
-       break;
       }
     }
   }
@@ -2581,7 +2578,6 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
 //       break;
        default:
          return -ENOPROTOOPT;
-         break;
       };
       break;
   }
@@ -2601,7 +2597,6 @@ static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
 //       break;
        default:
          return -ENOPROTOOPT;
-         break;
       };
       break;
   }
index 8af25a097d75ae2a189d5e5aa25c1b226c849dc7..7233c88f01b8366547467215dc7c8492f03daacb 100644 (file)
 #include "hyperv_vmbus.h"
 
 
-/* #defines */
-
-
-/* Amount of space to write to */
-#define BYTES_AVAIL_TO_WRITE(r, w, z) \
-       ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
-
-
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
-                         u32 *read, u32 *write)
-{
-       u32 read_loc, write_loc;
-
-       smp_read_barrier_depends();
-
-       /* Capture the read/write indices before they changed */
-       read_loc = rbi->ring_buffer->read_index;
-       write_loc = rbi->ring_buffer->write_index;
-
-       *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
-       *read = rbi->ring_datasize - *write;
-}
-
 /*
  * hv_get_next_write_location()
  *
index e3e470fecaa99499835b80d53b83dda3b5f50350..59fbd704a1eca60a06ce7ec7e833e73bf86b5fc2 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/inetdevice.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <net/route.h>
 
 #include <net/tcp.h>
 #include <net/ipv6.h>
@@ -1826,7 +1827,10 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
        route->path_rec->reversible = 1;
        route->path_rec->pkey = cpu_to_be16(0xffff);
        route->path_rec->mtu_selector = IB_SA_EQ;
-       route->path_rec->sl = id_priv->tos >> 5;
+       route->path_rec->sl = netdev_get_prio_tc_map(
+                       ndev->priv_flags & IFF_802_1Q_VLAN ?
+                               vlan_dev_real_dev(ndev) : ndev,
+                       rt_tos2priority(id_priv->tos));
 
        route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
        route->path_rec->rate_selector = IB_SA_EQ;
index 396e293703040938b2459a3c53e022d9444a702d..e497dfbee4352c99b8fdafba40ccfa4aff1e367f 100644 (file)
@@ -125,7 +125,8 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
        unsigned char *prev_tail;
 
        prev_tail = skb_tail_pointer(skb);
-       NLA_PUT(skb, type, len, data);
+       if (nla_put(skb, type, len, data))
+               goto nla_put_failure;
        nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
        return 0;
 
index c5fe3a3db8c919fe397e9e6515c9f9eb8dd6df1c..f03d7a481a809902ea03ada0bb4ff6c9f2ca5196 100644 (file)
@@ -687,18 +687,19 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (priv->do_get_state)
                priv->do_get_state(dev, &state);
-       NLA_PUT_U32(skb, IFLA_CAN_STATE, state);
-       NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm);
-       NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms);
-       NLA_PUT(skb, IFLA_CAN_BITTIMING,
-               sizeof(priv->bittiming), &priv->bittiming);
-       NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
-       if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec))
-               NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec);
-       if (priv->bittiming_const)
-               NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
-                       sizeof(*priv->bittiming_const), priv->bittiming_const);
-
+       if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+           nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+           nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+           nla_put(skb, IFLA_CAN_BITTIMING,
+                   sizeof(priv->bittiming), &priv->bittiming) ||
+           nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
+           (priv->do_get_berr_counter &&
+            !priv->do_get_berr_counter(dev, &bec) &&
+            nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+           (priv->bittiming_const &&
+            nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+                    sizeof(*priv->bittiming_const), priv->bittiming_const)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -714,9 +715,9 @@ static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
 
-       NLA_PUT(skb, IFLA_INFO_XSTATS,
-               sizeof(priv->can_stats), &priv->can_stats);
-
+       if (nla_put(skb, IFLA_INFO_XSTATS,
+                   sizeof(priv->can_stats), &priv->can_stats))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 11476ca95e93978b4e60e9108d3805a8e4797ab0..203ff9dccadb03206a20f96ea1e71437a98be3be 100644 (file)
@@ -501,6 +501,7 @@ static const struct ethtool_ops ax_ethtool_ops = {
        .get_settings           = ax_get_settings,
        .set_settings           = ax_set_settings,
        .get_link               = ethtool_op_get_link,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 #ifdef CONFIG_AX88796_93CX6
index dbefd5658c146265c1c8a646e432be59ab0e4926..8322c54972f3668710c8418c8cf02fb0526cabfe 100644 (file)
@@ -635,6 +635,7 @@ static const struct ethtool_ops etherh_ethtool_ops = {
        .get_settings   = etherh_get_settings,
        .set_settings   = etherh_set_settings,
        .get_drvinfo    = etherh_get_drvinfo,
+       .get_ts_info    = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops etherh_netdev_ops = {
index c63a64cb608546b0a13fcd48842998786261dca9..a11af5cc484477283ccb624df88bf072fc2f8088 100644 (file)
@@ -174,6 +174,7 @@ source "drivers/net/ethernet/tile/Kconfig"
 source "drivers/net/ethernet/toshiba/Kconfig"
 source "drivers/net/ethernet/tundra/Kconfig"
 source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/wiznet/Kconfig"
 source "drivers/net/ethernet/xilinx/Kconfig"
 source "drivers/net/ethernet/xircom/Kconfig"
 
index 9676a5109d94a42d7ed8ee35a9a6ddbe776ecac7..878ad32b93f21c8fd8191c152c721c409bd81d25 100644 (file)
@@ -73,5 +73,6 @@ obj-$(CONFIG_TILE_NET) += tile/
 obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
 obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
 obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
 obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
 obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
index d896816512ca952f943b9bbe04e27686a0bab7fd..d920a529ba22168b17b124552f7149fdba708c83 100644 (file)
@@ -114,15 +114,6 @@ static int rx_copybreak /* = 0 */;
 #define DMA_BURST_SIZE 128
 #endif
 
-/* Used to pass the media type, etc.
-   Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
-   The media type is usually passed in 'options[]'.
-   These variables are deprecated, use ethtool instead. -Ion
-*/
-#define MAX_UNITS 8            /* More are supported, limit only on options */
-static int options[MAX_UNITS] = {0, };
-static int full_duplex[MAX_UNITS] = {0, };
-
 /* Operational parameters that are set at compile time. */
 
 /* The "native" ring sizes are either 256 or 2048.
@@ -192,8 +183,6 @@ module_param(debug, int, 0);
 module_param(rx_copybreak, int, 0);
 module_param(intr_latency, int, 0);
 module_param(small_frames, int, 0);
-module_param_array(options, int, NULL, 0);
-module_param_array(full_duplex, int, NULL, 0);
 module_param(enable_hw_cksum, int, 0);
 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 MODULE_PARM_DESC(mtu, "MTU (all boards)");
@@ -201,8 +190,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-6)");
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
-MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
-MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 
 /*
@@ -657,10 +644,10 @@ static const struct net_device_ops netdev_ops = {
 static int __devinit starfire_init_one(struct pci_dev *pdev,
                                       const struct pci_device_id *ent)
 {
+       struct device *d = &pdev->dev;
        struct netdev_private *np;
-       int i, irq, option, chip_idx = ent->driver_data;
+       int i, irq, chip_idx = ent->driver_data;
        struct net_device *dev;
-       static int card_idx = -1;
        long ioaddr;
        void __iomem *base;
        int drv_flags, io_size;
@@ -673,15 +660,13 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
                printk(version);
 #endif
 
-       card_idx++;
-
        if (pci_enable_device (pdev))
                return -EIO;
 
        ioaddr = pci_resource_start(pdev, 0);
        io_size = pci_resource_len(pdev, 0);
        if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
-               printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
+               dev_err(d, "no PCI MEM resources, aborting\n");
                return -ENODEV;
        }
 
@@ -694,14 +679,14 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
        irq = pdev->irq;
 
        if (pci_request_regions (pdev, DRV_NAME)) {
-               printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
+               dev_err(d, "cannot reserve PCI resources, aborting\n");
                goto err_out_free_netdev;
        }
 
        base = ioremap(ioaddr, io_size);
        if (!base) {
-               printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
-                       card_idx, io_size, ioaddr);
+               dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
+                       io_size, ioaddr);
                goto err_out_free_res;
        }
 
@@ -753,9 +738,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
        /* wait a little longer */
        udelay(1000);
 
-       dev->base_addr = (unsigned long)base;
-       dev->irq = irq;
-
        np = netdev_priv(dev);
        np->dev = dev;
        np->base = base;
@@ -772,21 +754,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
 
        drv_flags = netdrv_tbl[chip_idx].drv_flags;
 
-       option = card_idx < MAX_UNITS ? options[card_idx] : 0;
-       if (dev->mem_start)
-               option = dev->mem_start;
-
-       /* The lower four bits are the media type. */
-       if (option & 0x200)
-               np->mii_if.full_duplex = 1;
-
-       if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
-               np->mii_if.full_duplex = 1;
-
-       if (np->mii_if.full_duplex)
-               np->mii_if.force_media = 1;
-       else
-               np->mii_if.force_media = 0;
        np->speed100 = 1;
 
        /* timer resolution is 128 * 0.8us */
@@ -909,13 +876,14 @@ static int netdev_open(struct net_device *dev)
        const __be32 *fw_rx_data, *fw_tx_data;
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base;
+       const int irq = np->pci_dev->irq;
        int i, retval;
        size_t tx_size, rx_size;
        size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 
        /* Do we ever need to reset the chip??? */
 
-       retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+       retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
        if (retval)
                return retval;
 
@@ -924,7 +892,7 @@ static int netdev_open(struct net_device *dev)
        writel(1, ioaddr + PCIDeviceConfig);
        if (debug > 1)
                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-                      dev->name, dev->irq);
+                      dev->name, irq);
 
        /* Allocate the various queues. */
        if (!np->queue_mem) {
@@ -935,7 +903,7 @@ static int netdev_open(struct net_device *dev)
                np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
                np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
                if (np->queue_mem == NULL) {
-                       free_irq(dev->irq, dev);
+                       free_irq(irq, dev);
                        return -ENOMEM;
                }
 
@@ -1962,7 +1930,7 @@ static int netdev_close(struct net_device *dev)
                }
        }
 
-       free_irq(dev->irq, dev);
+       free_irq(np->pci_dev->irq, dev);
 
        /* Free all the skbuffs in the Rx queue. */
        for (i = 0; i < RX_RING_SIZE; i++) {
index ab4daeccdf98a58eb4c5d849c77e09dd2a13b6c8..f816426e1085812b6524c037992594e4a539c746 100644 (file)
@@ -548,6 +548,25 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
        return 0;
 }
 
+static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
+       struct ethtool_ts_info *info)
+{
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_HARDWARE |
+               SOF_TIMESTAMPING_RX_HARDWARE |
+               SOF_TIMESTAMPING_SYS_HARDWARE;
+       info->phc_index = -1;
+       info->tx_types =
+               (1 << HWTSTAMP_TX_OFF) |
+               (1 << HWTSTAMP_TX_ON);
+       info->rx_filters =
+               (1 << HWTSTAMP_FILTER_NONE) |
+               (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+       return 0;
+}
+
 static const struct ethtool_ops bfin_mac_ethtool_ops = {
        .get_settings = bfin_mac_ethtool_getsettings,
        .set_settings = bfin_mac_ethtool_setsettings,
@@ -555,6 +574,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
        .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
        .get_wol = bfin_mac_ethtool_getwol,
        .set_wol = bfin_mac_ethtool_setwol,
+       .get_ts_info = bfin_mac_ethtool_get_ts_info,
 };
 
 /**************************************************************************/
index 1ef0c9275deefee39bdca1df04d7240b6adeba52..ef5b85b9569e3dcfd3d284c0a475d162d278f247 100644 (file)
@@ -2307,8 +2307,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
                                "Unable to allocate MSI interrupt Error: %d\n",
                                err);
                adapter->have_msi = false;
-       } else
-               netdev->irq = pdev->irq;
+       }
 
        if (!adapter->have_msi)
                flags |= IRQF_SHARED;
@@ -2616,7 +2615,6 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
        SET_NETDEV_DEV(netdev, &pdev->dev);
        pci_set_drvdata(pdev, netdev);
 
-       netdev->irq  = pdev->irq;
        netdev->netdev_ops = &atl1c_netdev_ops;
        netdev->watchdog_timeo = AT_TX_WATCHDOG;
        atl1c_set_ethtool_ops(netdev);
@@ -2706,7 +2704,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev, "cannot map device registers\n");
                goto err_ioremap;
        }
-       netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
 
        /* init mii data */
        adapter->mii.dev = netdev;
index 93ff2b231284e6f93ad8bdc0ca957bcdeaa7dfe3..1220e511ced66015c5e01587580303a7c10331d3 100644 (file)
@@ -1883,27 +1883,24 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
        int err = 0;
 
        adapter->have_msi = true;
-       err = pci_enable_msi(adapter->pdev);
+       err = pci_enable_msi(pdev);
        if (err) {
-               netdev_dbg(adapter->netdev,
+               netdev_dbg(netdev,
                           "Unable to allocate MSI interrupt Error: %d\n", err);
                adapter->have_msi = false;
-       } else
-               netdev->irq = pdev->irq;
-
+       }
 
        if (!adapter->have_msi)
                flags |= IRQF_SHARED;
-       err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
-                       netdev->name, netdev);
+       err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
        if (err) {
                netdev_dbg(adapter->netdev,
                           "Unable to allocate interrupt Error: %d\n", err);
                if (adapter->have_msi)
-                       pci_disable_msi(adapter->pdev);
+                       pci_disable_msi(pdev);
                return err;
        }
-       netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+       netdev_dbg(netdev, "atl1e_request_irq OK\n");
        return err;
 }
 
@@ -2233,7 +2230,6 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
        SET_NETDEV_DEV(netdev, &pdev->dev);
        pci_set_drvdata(pdev, netdev);
 
-       netdev->irq  = pdev->irq;
        netdev->netdev_ops = &atl1e_netdev_ops;
 
        netdev->watchdog_timeo = AT_TX_WATCHDOG;
@@ -2319,7 +2315,6 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
                netdev_err(netdev, "cannot map device registers\n");
                goto err_ioremap;
        }
-       netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
 
        /* init mii data */
        adapter->mii.dev = netdev;
index 8297e2868736382a5301a31d56711a7c3a9ae4b3..36037a6778205bfb9673803ec72e85672c13fba0 100644 (file)
@@ -7976,7 +7976,6 @@ static int __devinit
 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 {
        struct bnx2 *bp;
-       unsigned long mem_len;
        int rc, i, j;
        u32 reg;
        u64 dma_mask, persist_dma_mask;
@@ -8036,13 +8035,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 #endif
        INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
-       dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
-       dev->mem_end = dev->mem_start + mem_len;
-       dev->irq = pdev->irq;
-
-       bp->regview = ioremap_nocache(dev->base_addr, mem_len);
-
+       bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
+                                                        TX_MAX_TSS_RINGS + 1));
        if (!bp->regview) {
                dev_err(&pdev->dev, "Cannot map register space, aborting\n");
                rc = -ENOMEM;
@@ -8346,10 +8340,8 @@ err_out_unmap:
                bp->flags &= ~BNX2_FLAG_AER_ENABLED;
        }
 
-       if (bp->regview) {
-               iounmap(bp->regview);
-               bp->regview = NULL;
-       }
+       pci_iounmap(pdev, bp->regview);
+       bp->regview = NULL;
 
 err_out_release:
        pci_release_regions(pdev);
@@ -8432,7 +8424,7 @@ static int __devinit
 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        static int version_printed = 0;
-       struct net_device *dev = NULL;
+       struct net_device *dev;
        struct bnx2 *bp;
        int rc;
        char str[40];
@@ -8442,15 +8434,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* dev zeroed in init_etherdev */
        dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
-
        if (!dev)
                return -ENOMEM;
 
        rc = bnx2_init_board(pdev, dev);
-       if (rc < 0) {
-               free_netdev(dev);
-               return rc;
-       }
+       if (rc < 0)
+               goto err_free;
 
        dev->netdev_ops = &bnx2_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
@@ -8480,22 +8469,21 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto error;
        }
 
-       netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
-                   board_info[ent->driver_data].name,
+       netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
+                   "node addr %pM\n", board_info[ent->driver_data].name,
                    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
                    ((CHIP_ID(bp) & 0x0ff0) >> 4),
-                   bnx2_bus_string(bp, str),
-                   dev->base_addr,
-                   bp->pdev->irq, dev->dev_addr);
+                   bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
+                   pdev->irq, dev->dev_addr);
 
        return 0;
 
 error:
-       if (bp->regview)
-               iounmap(bp->regview);
+       iounmap(bp->regview);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
+err_free:
        free_netdev(dev);
        return rc;
 }
@@ -8511,8 +8499,7 @@ bnx2_remove_one(struct pci_dev *pdev)
        del_timer_sync(&bp->timer);
        cancel_work_sync(&bp->reset_task);
 
-       if (bp->regview)
-               iounmap(bp->regview);
+       pci_iounmap(bp->pdev, bp->regview);
 
        kfree(bp->temp_stats_blk);
 
index 2c9ee552dffcccd5cb22fcfbbbc99dea7e309141..bfa78883d5c73424ad16082b820b1f4d04713b23 100644 (file)
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.72.10-0"
-#define DRV_MODULE_RELDATE      "2012/02/20"
+#define DRV_MODULE_VERSION      "1.72.17-0"
+#define DRV_MODULE_RELDATE      "2012/04/02"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
 #define BCM_DCBNL
 #endif
+
+
+#include "bnx2x_hsi.h"
+
 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 #define BCM_CNIC 1
 #include "../cnic_if.h"
@@ -815,6 +819,8 @@ struct bnx2x_common {
 #define CHIP_NUM_57800_MF              0x16a5
 #define CHIP_NUM_57810                 0x168e
 #define CHIP_NUM_57810_MF              0x16ae
+#define CHIP_NUM_57811                 0x163d
+#define CHIP_NUM_57811_MF              0x163e
 #define CHIP_NUM_57840                 0x168d
 #define CHIP_NUM_57840_MF              0x16ab
 #define CHIP_IS_E1(bp)                 (CHIP_NUM(bp) == CHIP_NUM_57710)
@@ -826,6 +832,8 @@ struct bnx2x_common {
 #define CHIP_IS_57800_MF(bp)           (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
 #define CHIP_IS_57810(bp)              (CHIP_NUM(bp) == CHIP_NUM_57810)
 #define CHIP_IS_57810_MF(bp)           (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57811(bp)              (CHIP_NUM(bp) == CHIP_NUM_57811)
+#define CHIP_IS_57811_MF(bp)           (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
 #define CHIP_IS_57840(bp)              (CHIP_NUM(bp) == CHIP_NUM_57840)
 #define CHIP_IS_57840_MF(bp)           (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
 #define CHIP_IS_E1H(bp)                        (CHIP_IS_57711(bp) || \
@@ -836,6 +844,8 @@ struct bnx2x_common {
                                         CHIP_IS_57800_MF(bp) || \
                                         CHIP_IS_57810(bp) || \
                                         CHIP_IS_57810_MF(bp) || \
+                                        CHIP_IS_57811(bp) || \
+                                        CHIP_IS_57811_MF(bp) || \
                                         CHIP_IS_57840(bp) || \
                                         CHIP_IS_57840_MF(bp))
 #define CHIP_IS_E1x(bp)                        (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
@@ -1300,6 +1310,7 @@ struct bnx2x {
 #define NO_ISCSI_FLAG                  (1 << 14)
 #define NO_FCOE_FLAG                   (1 << 15)
 #define BC_SUPPORTS_PFC_STATS          (1 << 17)
+#define USING_SINGLE_MSIX_FLAG         (1 << 20)
 
 #define NO_ISCSI(bp)           ((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)       ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1329,8 +1340,8 @@ struct bnx2x {
        struct bnx2x_common     common;
        struct bnx2x_port       port;
 
-       struct cmng_struct_per_port cmng;
-       u32                     vn_weight_sum;
+       struct cmng_init        cmng;
+
        u32                     mf_config[E1HVN_MAX];
        u32                     mf2_config[E2_FUNC_MAX];
        u32                     path_has_ovlan; /* E3 */
@@ -1371,7 +1382,6 @@ struct bnx2x {
 #define BNX2X_STATE_DIAG               0xe000
 #define BNX2X_STATE_ERROR              0xf000
 
-       int                     multi_mode;
 #define BNX2X_MAX_PRIORITY             8
 #define BNX2X_MAX_ENTRIES_PER_PRI      16
 #define BNX2X_MAX_COS                  3
index 4b054812713a7b3f9968d4b82310c171a6f4fff6..5a58cff78dc2f4e135505b54e946302c2dfbc364 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
-#include <linux/firmware.h>
 #include <linux/prefetch.h>
 #include "bnx2x_cmn.h"
 #include "bnx2x_init.h"
@@ -1212,16 +1211,15 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
 
 void bnx2x_free_irq(struct bnx2x *bp)
 {
-       if (bp->flags & USING_MSIX_FLAG)
+       if (bp->flags & USING_MSIX_FLAG &&
+           !(bp->flags & USING_SINGLE_MSIX_FLAG))
                bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
                                     CNIC_PRESENT + 1);
-       else if (bp->flags & USING_MSI_FLAG)
-               free_irq(bp->pdev->irq, bp->dev);
        else
-               free_irq(bp->pdev->irq, bp->dev);
+               free_irq(bp->dev->irq, bp->dev);
 }
 
-int bnx2x_enable_msix(struct bnx2x *bp)
+int __devinit bnx2x_enable_msix(struct bnx2x *bp)
 {
        int msix_vec = 0, i, rc, req_cnt;
 
@@ -1261,8 +1259,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
                rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
 
                if (rc) {
-                       BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
-                       return rc;
+                       BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+                       goto no_msix;
                }
                /*
                 * decrease number of queues by number of unallocated entries
@@ -1270,18 +1268,34 @@ int bnx2x_enable_msix(struct bnx2x *bp)
                bp->num_queues -= diff;
 
                BNX2X_DEV_INFO("New queue configuration set: %d\n",
-                                 bp->num_queues);
-       } else if (rc) {
-               /* fall to INTx if not enough memory */
-               if (rc == -ENOMEM)
-                       bp->flags |= DISABLE_MSI_FLAG;
+                              bp->num_queues);
+       } else if (rc > 0) {
+               /* Get by with single vector */
+               rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
+               if (rc) {
+                       BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
+                                      rc);
+                       goto no_msix;
+               }
+
+               BNX2X_DEV_INFO("Using single MSI-X vector\n");
+               bp->flags |= USING_SINGLE_MSIX_FLAG;
+
+       } else if (rc < 0) {
                BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
-               return rc;
+               goto no_msix;
        }
 
        bp->flags |= USING_MSIX_FLAG;
 
        return 0;
+
+no_msix:
+       /* fall to INTx if not enough memory */
+       if (rc == -ENOMEM)
+               bp->flags |= DISABLE_MSI_FLAG;
+
+       return rc;
 }
 
 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
@@ -1343,22 +1357,26 @@ int bnx2x_enable_msi(struct bnx2x *bp)
 static int bnx2x_req_irq(struct bnx2x *bp)
 {
        unsigned long flags;
-       int rc;
+       unsigned int irq;
 
-       if (bp->flags & USING_MSI_FLAG)
+       if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
                flags = 0;
        else
                flags = IRQF_SHARED;
 
-       rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
-                        bp->dev->name, bp->dev);
-       return rc;
+       if (bp->flags & USING_MSIX_FLAG)
+               irq = bp->msix_table[0].vector;
+       else
+               irq = bp->pdev->irq;
+
+       return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
 }
 
 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
 {
        int rc = 0;
-       if (bp->flags & USING_MSIX_FLAG) {
+       if (bp->flags & USING_MSIX_FLAG &&
+           !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
                rc = bnx2x_req_msix_irqs(bp);
                if (rc)
                        return rc;
@@ -1371,8 +1389,13 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp)
                }
                if (bp->flags & USING_MSI_FLAG) {
                        bp->dev->irq = bp->pdev->irq;
-                       netdev_info(bp->dev, "using MSI  IRQ %d\n",
-                              bp->pdev->irq);
+                       netdev_info(bp->dev, "using MSI IRQ %d\n",
+                                   bp->dev->irq);
+               }
+               if (bp->flags & USING_MSIX_FLAG) {
+                       bp->dev->irq = bp->msix_table[0].vector;
+                       netdev_info(bp->dev, "using MSIX IRQ %d\n",
+                                   bp->dev->irq);
                }
        }
 
@@ -1437,20 +1460,11 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
        return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
 }
 
+
 void bnx2x_set_num_queues(struct bnx2x *bp)
 {
-       switch (bp->multi_mode) {
-       case ETH_RSS_MODE_DISABLED:
-               bp->num_queues = 1;
-               break;
-       case ETH_RSS_MODE_REGULAR:
-               bp->num_queues = bnx2x_calc_num_queues(bp);
-               break;
-
-       default:
-               bp->num_queues = 1;
-               break;
-       }
+       /* RSS queues */
+       bp->num_queues = bnx2x_calc_num_queues(bp);
 
 #ifdef BCM_CNIC
        /* override in STORAGE SD mode */
@@ -1549,16 +1563,13 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
        u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
        u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 
-       /*
-        * Prepare the inital contents fo the indirection table if RSS is
+       /* Prepare the initial contents fo the indirection table if RSS is
         * enabled
         */
-       if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
-               for (i = 0; i < sizeof(ind_table); i++)
-                       ind_table[i] =
-                               bp->fp->cl_id +
-                               ethtool_rxfh_indir_default(i, num_eth_queues);
-       }
+       for (i = 0; i < sizeof(ind_table); i++)
+               ind_table[i] =
+                       bp->fp->cl_id +
+                       ethtool_rxfh_indir_default(i, num_eth_queues);
 
        /*
         * For 57710 and 57711 SEARCHER configuration (rss_keys) is
@@ -1568,11 +1579,12 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
         * For 57712 and newer on the other hand it's a per-function
         * configuration.
         */
-       return bnx2x_config_rss_pf(bp, ind_table,
-                                  bp->port.pmf || !CHIP_IS_E1x(bp));
+       return bnx2x_config_rss_eth(bp, ind_table,
+                                   bp->port.pmf || !CHIP_IS_E1x(bp));
 }
 
-int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+                       u8 *ind_table, bool config_hash)
 {
        struct bnx2x_config_rss_params params = {NULL};
        int i;
@@ -1584,52 +1596,29 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
         *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
         */
 
-       params.rss_obj = &bp->rss_conf_obj;
+       params.rss_obj = rss_obj;
 
        __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
 
-       /* RSS mode */
-       switch (bp->multi_mode) {
-       case ETH_RSS_MODE_DISABLED:
-               __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
-               break;
-       case ETH_RSS_MODE_REGULAR:
-               __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-               break;
-       case ETH_RSS_MODE_VLAN_PRI:
-               __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
-               break;
-       case ETH_RSS_MODE_E1HOV_PRI:
-               __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
-               break;
-       case ETH_RSS_MODE_IP_DSCP:
-               __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
-               break;
-       default:
-               BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
-               return -EINVAL;
-       }
+       __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
 
-       /* If RSS is enabled */
-       if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
-               /* RSS configuration */
-               __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
-               __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
-               __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
-               __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+       /* RSS configuration */
+       __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+       __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+       __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+       __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
 
-               /* Hash bits */
-               params.rss_result_mask = MULTI_MASK;
+       /* Hash bits */
+       params.rss_result_mask = MULTI_MASK;
 
-               memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+       memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
 
-               if (config_hash) {
-                       /* RSS keys */
-                       for (i = 0; i < sizeof(params.rss_key) / 4; i++)
-                               params.rss_key[i] = random32();
+       if (config_hash) {
+               /* RSS keys */
+               for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+                       params.rss_key[i] = random32();
 
-                       __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
-               }
+               __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
        }
 
        return bnx2x_config_rss(bp, &params);
index 5c27454d2ec276c81e31cb09b7241b158db68441..2c3a243c84b3ab17b4ae6483c1c72c09604de748 100644 (file)
@@ -86,13 +86,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
 void bnx2x_send_unload_done(struct bnx2x *bp);
 
 /**
- * bnx2x_config_rss_pf - configure RSS parameters.
+ * bnx2x_config_rss_pf - configure RSS parameters in a PF.
  *
  * @bp:                        driver handle
+ * @rss_obj            RSS object to use
  * @ind_table:         indirection table to configure
  * @config_hash:       re-configure RSS hash keys configuration
  */
-int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+                       u8 *ind_table, bool config_hash);
 
 /**
  * bnx2x__init_func_obj - init function object
@@ -485,7 +487,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
  * fills msix_table, requests vectors, updates num_queues
  * according to number of available vectors.
  */
-int bnx2x_enable_msix(struct bnx2x *bp);
+int __devinit bnx2x_enable_msix(struct bnx2x *bp);
 
 /**
  * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -843,7 +845,7 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
 {
        if (bp->flags & USING_MSIX_FLAG) {
                pci_disable_msix(bp->pdev);
-               bp->flags &= ~USING_MSIX_FLAG;
+               bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
        } else if (bp->flags & USING_MSI_FLAG) {
                pci_disable_msi(bp->pdev);
                bp->flags &= ~USING_MSI_FLAG;
@@ -964,6 +966,19 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
 
 /************************* Init ******************************************/
 
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+       return 2 * vn + BP_PORT(bp);
+}
+
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
+                                      bool config_hash)
+{
+       return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
+                                  config_hash);
+}
+
 /**
  * bnx2x_func_start - init function
  *
@@ -1419,15 +1434,32 @@ static inline void storm_memset_func_cfg(struct bnx2x *bp,
 }
 
 static inline void storm_memset_cmng(struct bnx2x *bp,
-                               struct cmng_struct_per_port *cmng,
+                               struct cmng_init *cmng,
                                u8 port)
 {
+       int vn;
        size_t size = sizeof(struct cmng_struct_per_port);
 
        u32 addr = BAR_XSTRORM_INTMEM +
                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
 
-       __storm_memset_struct(bp, addr, size, (u32 *)cmng);
+       __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
+
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+               int func = func_by_vn(bp, vn);
+
+               addr = BAR_XSTRORM_INTMEM +
+                      XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
+               size = sizeof(struct rate_shaping_vars_per_vn);
+               __storm_memset_struct(bp, addr, size,
+                                     (u32 *)&cmng->vnic.vnic_max_rate[vn]);
+
+               addr = BAR_XSTRORM_INTMEM +
+                      XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
+               size = sizeof(struct fairness_vars_per_vn);
+               __storm_memset_struct(bp, addr, size,
+                                     (u32 *)&cmng->vnic.vnic_min_rate[vn]);
+       }
 }
 
 /**
@@ -1608,11 +1640,6 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
  */
 void bnx2x_get_iscsi_info(struct bnx2x *bp);
 #endif
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
-       return 2 * vn + BP_PORT(bp);
-}
 
 /**
  * bnx2x_link_sync_notify - send notification to other functions.
index 2cc0a1703970d9b5ae3bd22dc9cac4cfefa4a8de..3c7d0cc77e23c50306cdf51cfedf9ec31ccde027 100644 (file)
 #include <linux/types.h>
 #include <linux/sched.h>
 #include <linux/crc32.h>
-
-
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 #include "bnx2x_dump.h"
 #include "bnx2x_init.h"
-#include "bnx2x_sp.h"
 
 /* Note: in the format strings below %s is replaced by the queue-name which is
  * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -2396,10 +2393,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 
 static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-
-       return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
-               0 : T_ETH_INDIRECTION_TABLE_SIZE);
+       return T_ETH_INDIRECTION_TABLE_SIZE;
 }
 
 static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
@@ -2445,7 +2439,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
                ind_table[i] = indir[i] + bp->fp->cl_id;
        }
 
-       return bnx2x_config_rss_pf(bp, ind_table, false);
+       return bnx2x_config_rss_eth(bp, ind_table, false);
 }
 
 static const struct ethtool_ops bnx2x_ethtool_ops = {
index dbff5915b81a3b998f41d1f9bed232421417791b..799272d164e5f74dfa28c5e8b86d01ef36b3db9a 100644 (file)
@@ -4448,6 +4448,65 @@ struct cmng_struct_per_port {
        struct cmng_flags_per_port flags;
 };
 
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter {
+       u32 quota;
+#if defined(__BIG_ENDIAN)
+       u16 __reserved0;
+       u16 rate;
+#elif defined(__LITTLE_ENDIAN)
+       u16 rate;
+       u16 __reserved0;
+#endif
+};
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn {
+       struct rate_shaping_counter vn_counter;
+};
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn {
+       u32 cos_credit_delta[MAX_COS_NUMBER];
+       u32 vn_credit_delta;
+       u32 __reserved0;
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_vnic {
+       struct rate_shaping_vars_per_vn vnic_max_rate[4];
+       struct fairness_vars_per_vn vnic_min_rate[4];
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_init {
+       struct cmng_struct_per_port port;
+       struct cmng_vnic vnic;
+};
+
+
+/*
+ * driver parameters for congestion management init, all rates are in Mbps
+ */
+struct cmng_init_input {
+       u32 port_rate;
+       u16 vnic_min_rate[4];
+       u16 vnic_max_rate[4];
+       u16 cos_min_rate[MAX_COS_NUMBER];
+       u16 cos_to_pause_mask[MAX_COS_NUMBER];
+       struct cmng_flags_per_port flags;
+};
+
 
 /*
  * Protocol-common command ID for slow path elements
@@ -4762,16 +4821,6 @@ enum fairness_mode {
 };
 
 
-/*
- * per-vnic fairness variables
- */
-struct fairness_vars_per_vn {
-       u32 cos_credit_delta[MAX_COS_NUMBER];
-       u32 vn_credit_delta;
-       u32 __reserved0;
-};
-
-
 /*
  * Priority and cos
  */
@@ -5139,29 +5188,6 @@ struct protocol_common_spe {
 };
 
 
-/*
- * a single rate shaping counter. can be used as protocol or vnic counter
- */
-struct rate_shaping_counter {
-       u32 quota;
-#if defined(__BIG_ENDIAN)
-       u16 __reserved0;
-       u16 rate;
-#elif defined(__LITTLE_ENDIAN)
-       u16 rate;
-       u16 __reserved0;
-#endif
-};
-
-
-/*
- * per-vnic rate shaping variables
- */
-struct rate_shaping_vars_per_vn {
-       struct rate_shaping_counter vn_counter;
-};
-
-
 /*
  * The send queue element
  */
index 29f5c3cca31a1a6b809749c560058a89341f5e6d..2b7a2bd0592c5962325bd7592a281aeb6554e153 100644 (file)
@@ -241,7 +241,8 @@ static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
                        REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
 
                        /* set/clear queue bit in command-queue bit map
-                       (E2/E3A0 only, valid COS values are 0/1) */
+                        * (E2/E3A0 only, valid COS values are 0/1)
+                        */
                        if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
                                reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
                                reg_bit_map = REG_RD(bp, reg_addr);
@@ -277,7 +278,215 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
 }
 
 
-/* Returns the index of start or end of a specific block stage in ops array*/
+/* congestion managment port init api description
+ * the api works as follows:
+ * the driver should pass the cmng_init_input struct, the port_init function
+ * will prepare the required internal ram structure which will be passed back
+ * to the driver (cmng_init) that will write it into the internal ram.
+ *
+ * IMPORTANT REMARKS:
+ * 1. the cmng_init struct does not represent the contiguous internal ram
+ *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
+ *    offset in order to write the port sub struct and the
+ *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
+ *    words - don't use memcpy!).
+ * 2. although the cmng_init struct is filled for the maximal vnic number
+ *    possible, the driver should only write the valid vnics into the internal
+ *    ram according to the appropriate port mode.
+ */
+#define BITS_TO_BYTES(x) ((x)/8)
+
+/* CMNG constants, as derived from system spec calculations */
+
+/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
+#define DEF_MIN_RATE 100
+
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer
+ */
+#define QM_ARB_BYTES 160000
+
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+
+/* how many bytes above threshold for
+ * the minimal credit of Min algorithm
+ */
+#define MIN_ABOVE_THRESH 32768
+
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair
+ */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+
+/* Memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+#define SAFC_TIMEOUT_USEC 52
+
+#define SDM_TICKS 4
+
+
+static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
+                                 u32 r_param, struct cmng_init *ram_data)
+{
+       u32 vnic;
+       struct cmng_vnic *vdata = &ram_data->vnic;
+       struct cmng_struct_per_port *pdata = &ram_data->port;
+       /* rate shaping per-port variables
+        * 100 micro seconds in SDM ticks = 25
+        * since each tick is 4 microSeconds
+        */
+
+       pdata->rs_vars.rs_periodic_timeout =
+       RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
+
+       /* this is the threshold below which no timer arming will occur.
+        * 1.25 coefficient is for the threshold to be a little bigger
+        * then the real time to compensate for timer in-accuracy
+        */
+       pdata->rs_vars.rs_threshold =
+       (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
+
+       /* rate shaping per-vnic variables */
+       for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+               /* global vnic counter */
+               vdata->vnic_max_rate[vnic].vn_counter.rate =
+               input_data->vnic_max_rate[vnic];
+               /* maximal Mbps for this vnic
+                * the quota in each timer period - number of bytes
+                * transmitted in this period
+                */
+               vdata->vnic_max_rate[vnic].vn_counter.quota =
+                       RS_PERIODIC_TIMEOUT_USEC *
+                       (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
+       }
+
+}
+
+static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
+                                 u32 r_param, struct cmng_init *ram_data)
+{
+       u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
+       struct cmng_vnic *vdata = &ram_data->vnic;
+       struct cmng_struct_per_port *pdata = &ram_data->port;
+
+       /* this is the resolution of the fairness timer */
+       fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+
+       /* fairness per-port variables
+        * for 10G it is 1000usec. for 1G it is 10000usec.
+        */
+       tFair = T_FAIR_COEF / input_data->port_rate;
+
+       /* this is the threshold below which we won't arm the timer anymore */
+       pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+
+       /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
+        * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
+        */
+       pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
+
+       /* since each tick is 4 microSeconds */
+       pdata->fair_vars.fairness_timeout =
+                               fair_periodic_timeout_usec / SDM_TICKS;
+
+       /* calculate sum of weights */
+       vnicWeightSum = 0;
+
+       for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
+               vnicWeightSum += input_data->vnic_min_rate[vnic];
+
+       /* global vnic counter */
+       if (vnicWeightSum > 0) {
+               /* fairness per-vnic variables */
+               for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+                       /* this is the credit for each period of the fairness
+                        * algorithm - number of bytes in T_FAIR (this vnic
+                        * share of the port rate)
+                        */
+                       vdata->vnic_min_rate[vnic].vn_credit_delta =
+                               (u32)input_data->vnic_min_rate[vnic] * 100 *
+                               (T_FAIR_COEF / (8 * 100 * vnicWeightSum));
+                       if (vdata->vnic_min_rate[vnic].vn_credit_delta <
+                           pdata->fair_vars.fair_threshold +
+                           MIN_ABOVE_THRESH) {
+                               vdata->vnic_min_rate[vnic].vn_credit_delta =
+                                       pdata->fair_vars.fair_threshold +
+                                       MIN_ABOVE_THRESH;
+                       }
+               }
+       }
+}
+
+static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
+                                    u32 r_param, struct cmng_init *ram_data)
+{
+       u32 vnic, cos;
+       u32 cosWeightSum = 0;
+       struct cmng_vnic *vdata = &ram_data->vnic;
+       struct cmng_struct_per_port *pdata = &ram_data->port;
+
+       for (cos = 0; cos < MAX_COS_NUMBER; cos++)
+               cosWeightSum += input_data->cos_min_rate[cos];
+
+       if (cosWeightSum > 0) {
+
+               for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+                       /* Since cos and vnic shouldn't work together the rate
+                        * to divide between the coses is the port rate.
+                        */
+                       u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
+                       for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
+                               /* this is the credit for each period of
+                                * the fairness algorithm - number of bytes
+                                * in T_FAIR (this cos share of the vnic rate)
+                                */
+                               ccd[cos] =
+                                   (u32)input_data->cos_min_rate[cos] * 100 *
+                                   (T_FAIR_COEF / (8 * 100 * cosWeightSum));
+                                if (ccd[cos] < pdata->fair_vars.fair_threshold
+                                               + MIN_ABOVE_THRESH) {
+                                       ccd[cos] =
+                                           pdata->fair_vars.fair_threshold +
+                                           MIN_ABOVE_THRESH;
+                               }
+                       }
+               }
+       }
+}
+
+static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
+                                  struct cmng_init *ram_data)
+{
+       /* in microSeconds */
+       ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
+}
+
+/* Congestion management port init */
+static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
+                                  struct cmng_init *ram_data)
+{
+       u32 r_param;
+       memset(ram_data, 0, sizeof(struct cmng_init));
+
+       ram_data->port.flags = input_data->flags;
+
+       /* number of bytes transmitted in a rate of 10Gbps
+        * in one usec = 1.25KB.
+        */
+       r_param = BITS_TO_BYTES(input_data->port_rate);
+       bnx2x_init_max(input_data, r_param, ram_data);
+       bnx2x_init_min(input_data, r_param, ram_data);
+       bnx2x_init_fw_wrr(input_data, r_param, ram_data);
+       bnx2x_init_safc(input_data, ram_data);
+}
+
+
+
+/* Returns the index of start or end of a specific block stage in ops array */
 #define BLOCK_OPS_IDX(block, stage, end) \
                        (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
 
@@ -499,9 +708,7 @@ static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
        bnx2x_set_mcp_parity(bp, false);
 }
 
-/**
- * Clear the parity error status registers.
- */
+/* Clear the parity error status registers. */
 static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
 {
        int i;
index ad95324dc0420681c21e12ead836eafdceb35a59..ff882a4820941ee3ea0dbab409cadde9fb676dc6 100644 (file)
 
 
 
-/* */
 #define SFP_EEPROM_CON_TYPE_ADDR               0x2
        #define SFP_EEPROM_CON_TYPE_VAL_LC      0x7
        #define SFP_EEPROM_CON_TYPE_VAL_COPPER  0x21
@@ -404,8 +403,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
 
        DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
 
-       /*
-        * mapping between entry  priority to client number (0,1,2 -debug and
+       /* mapping between entry  priority to client number (0,1,2 -debug and
         * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
         * 3bits client num.
         *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -413,8 +411,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
         */
 
        REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-       /*
-        * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+       /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries, 3 -
         * COS0 entry, 4 - COS1 entry.
         * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -425,13 +422,11 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
        /* defines which entries (clients) are subjected to WFQ arbitration */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-       /*
-        * For strict priority entries defines the number of consecutive
+       /* For strict priority entries defines the number of consecutive
         * slots for the highest priority.
         */
        REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-       /*
-        * mapping between the CREDIT_WEIGHT registers and actual client
+       /* mapping between the CREDIT_WEIGHT registers and actual client
         * numbers
         */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
@@ -443,8 +438,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
        REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
        /* ETS mode disable */
        REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
-       /*
-        * If ETS mode is enabled (there is no strict priority) defines a WFQ
+       /* If ETS mode is enabled (there is no strict priority) defines a WFQ
         * weight for COS0/COS1.
         */
        REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
@@ -471,10 +465,9 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
                        min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
        } else
                min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
-       /**
-        *  If the link isn't up (static configuration for example ) The
-        *  link will be according to 20GBPS.
-       */
+       /* If the link isn't up (static configuration for example ) The
+        * link will be according to 20GBPS.
+        */
        return min_w_val;
 }
 /******************************************************************************
@@ -538,8 +531,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
        struct bnx2x *bp = params->bp;
        const u8 port = params->port;
        const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
-       /**
-        * mapping between entry  priority to client number (0,1,2 -debug and
+       /* Mapping between entry  priority to client number (0,1,2 -debug and
         * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
         * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
         * reset value or init tool
@@ -551,18 +543,14 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
                REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
                REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
        }
-       /**
-       * For strict priority entries defines the number of consecutive
-       * slots for the highest priority.
-       */
-       /* TODO_ETS - Should be done by reset value or init tool */
+       /* For strict priority entries defines the number of consecutive
+        * slots for the highest priority.
+        */
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
                   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-       /**
-        * mapping between the CREDIT_WEIGHT registers and actual client
+       /* Mapping between the CREDIT_WEIGHT registers and actual client
         * numbers
         */
-       /* TODO_ETS - Should be done by reset value or init tool */
        if (port) {
                /*Port 1 has 6 COS*/
                REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
@@ -574,8 +562,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
                REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
        }
 
-       /**
-        * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+       /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries, 3 -
         * COS0 entry, 4 - COS1 entry.
         * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -590,13 +577,12 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
                   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
 
-       /**
-       * Please notice the register address are note continuous and a
-       * for here is note appropriate.In 2 port mode port0 only COS0-5
-       * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
-       * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
-       * are never used for WFQ
-       */
+       /* Please notice the register address are note continuous and a
+        * for here is note appropriate.In 2 port mode port0 only COS0-5
+        * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+        * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+        * are never used for WFQ
+        */
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
                   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
@@ -633,10 +619,9 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
        u32 base_upper_bound = 0;
        u8 max_cos = 0;
        u8 i = 0;
-       /**
-       * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
-       * port mode port1 has COS0-2 that can be used for WFQ.
-       */
+       /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+        * port mode port1 has COS0-2 that can be used for WFQ.
+        */
        if (!port) {
                base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
                max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -666,8 +651,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
        u32 base_weight = 0;
        u8 max_cos = 0;
 
-       /**
-        * mapping between entry  priority to client number 0 - COS0
+       /* Mapping between entry  priority to client number 0 - COS0
         * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
         * TODO_ETS - Should be done by reset value or init tool
         */
@@ -695,10 +679,9 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
 
        REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
                   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
-       /**
-       * In 2 port mode port0 has COS0-5 that can be used for WFQ.
-       * In 4 port mode port1 has COS0-2 that can be used for WFQ.
-       */
+       /* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+        * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+        */
        if (!port) {
                base_weight = PBF_REG_COS0_WEIGHT_P0;
                max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -738,7 +721,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
 /******************************************************************************
 * Description:
 *      Disable will return basicly the values to init values.
-*.
+*
 ******************************************************************************/
 int bnx2x_ets_disabled(struct link_params *params,
                      struct link_vars *vars)
@@ -867,7 +850,7 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
 /******************************************************************************
 * Description:
 *      Calculate the total BW.A value of 0 isn't legal.
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_get_total_bw(
        const struct link_params *params,
@@ -879,7 +862,6 @@ static int bnx2x_ets_e3b0_get_total_bw(
        u8 is_bw_cos_exist = 0;
 
        *total_bw = 0 ;
-
        /* Calculate total BW requested */
        for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
                if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
@@ -887,10 +869,9 @@ static int bnx2x_ets_e3b0_get_total_bw(
                        if (!ets_params->cos[cos_idx].params.bw_params.bw) {
                                DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
                                                   "was set to 0\n");
-                               /*
-                                * This is to prevent a state when ramrods
+                               /* This is to prevent a state when ramrods
                                 * can't be sent
-                               */
+                                */
                                ets_params->cos[cos_idx].params.bw_params.bw
                                         = 1;
                        }
@@ -908,8 +889,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
                }
                DP(NETIF_MSG_LINK,
                   "bnx2x_ets_E3B0_config total BW should be 100\n");
-               /*
-                * We can handle a case whre the BW isn't 100 this can happen
+               /* We can handle a case whre the BW isn't 100 this can happen
                 * if the TC are joined.
                 */
        }
@@ -919,7 +899,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
 /******************************************************************************
 * Description:
 *      Invalidate all the sp_pri_to_cos.
-*.
+*
 ******************************************************************************/
 static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
 {
@@ -931,7 +911,7 @@ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
 * Description:
 *      Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 *      according to sp_pri_to_cos.
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
                                            u8 *sp_pri_to_cos, const u8 pri,
@@ -964,7 +944,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
 * Description:
 *      Returns the correct value according to COS and priority in
 *      the sp_pri_cli register.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
                                         const u8 pri_set,
@@ -981,7 +961,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
 * Description:
 *      Returns the correct value according to COS and priority in the
 *      sp_pri_cli register for NIG.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
 {
@@ -997,7 +977,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
 * Description:
 *      Returns the correct value according to COS and priority in the
 *      sp_pri_cli register for PBF.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
 {
@@ -1013,7 +993,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
 * Description:
 *      Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 *      according to sp_pri_to_cos.(which COS has higher priority)
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
                                             u8 *sp_pri_to_cos)
@@ -1149,8 +1129,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
                return -EINVAL;
        }
 
-       /*
-        * Upper bound is set according to current link speed (min_w_val
+       /* Upper bound is set according to current link speed (min_w_val
         * should be the same for upper bound and COS credit val).
         */
        bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
@@ -1160,8 +1139,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
        for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
                if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
                        cos_bw_bitmap |= (1 << cos_entry);
-                       /*
-                        * The function also sets the BW in HW(not the mappin
+                       /* The function also sets the BW in HW(not the mappin
                         * yet)
                         */
                        bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
@@ -1217,14 +1195,12 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
        /* ETS disabled configuration */
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
-       /*
-        * defines which entries (clients) are subjected to WFQ arbitration
+       /* Defines which entries (clients) are subjected to WFQ arbitration
         * COS0 0x8
         * COS1 0x10
         */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-       /*
-        * mapping between the ARB_CREDIT_WEIGHT registers and actual
+       /* Mapping between the ARB_CREDIT_WEIGHT registers and actual
         * client numbers (WEIGHT_0 does not actually have to represent
         * client 0)
         *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -1242,8 +1218,7 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
 
        /* Defines the number of consecutive slots for the strict priority */
        REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-       /*
-        * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+       /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
         * entry, 4 - COS1 entry.
         * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1298,8 +1273,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        u32 val = 0;
 
        DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
-       /*
-        * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+       /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries,
         * 3 - COS0 entry, 4 - COS1 entry.
         *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1307,8 +1281,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
         * MCP and debug are strict
         */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-       /*
-        * For strict priority entries defines the number of consecutive slots
+       /* For strict priority entries defines the number of consecutive slots
         * for the highest priority.
         */
        REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
@@ -1320,8 +1293,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        /* Defines the number of consecutive slots for the strict priority */
        REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
 
-       /*
-        * mapping between entry  priority to client number (0,1,2 -debug and
+       /* Mapping between entry  priority to client number (0,1,2 -debug and
         * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
         * 3bits client num.
         *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -1356,15 +1328,12 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
        if (!(params->feature_config_flags &
              FEATURE_CONFIG_PFC_ENABLED)) {
 
-               /*
-                * RX flow control - Process pause frame in receive direction
+               /* RX flow control - Process pause frame in receive direction
                 */
                if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
                        pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
 
-               /*
-                * TX flow control - Send pause packet when buffer is full
-                */
+               /* TX flow control - Send pause packet when buffer is full */
                if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
                        pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
        } else {/* PFC support */
@@ -1457,8 +1426,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
 static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
 {
        u32 mode, emac_base;
-       /**
-        * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+       /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
         * (a value of 49==0x31) and make sure that the AUTO poll is off
         */
 
@@ -1578,15 +1546,6 @@ static void bnx2x_umac_enable(struct link_params *params,
 
        DP(NETIF_MSG_LINK, "enabling UMAC\n");
 
-       /**
-        * This register determines on which events the MAC will assert
-        * error on the i/f to the NIG along w/ EOP.
-        */
-
-       /**
-        * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
-        * params->port*0x14,      0xfffff.
-        */
        /* This register opens the gate for the UMAC despite its name */
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 
@@ -1649,8 +1608,7 @@ static void bnx2x_umac_enable(struct link_params *params,
                val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
        REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 
-       /*
-        * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+       /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
         * length used by the MAC receive logic to check frames.
         */
        REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -1666,8 +1624,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
        struct bnx2x *bp = params->bp;
        u32 is_port4mode = bnx2x_is_4_port_mode(bp);
 
-       /*
-        * In 4-port mode, need to set the mode only once, so if XMAC is
+       /* In 4-port mode, need to set the mode only once, so if XMAC is
         * already out of reset, it means the mode has already been set,
         * and it must not* reset the XMAC again, since it controls both
         * ports of the path
@@ -1691,13 +1648,13 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
        if (is_port4mode) {
                DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
 
-               /*  Set the number of ports on the system side to up to 2 */
+               /* Set the number of ports on the system side to up to 2 */
                REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
 
                /* Set the number of ports on the Warp Core to 10G */
                REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
        } else {
-               /*  Set the number of ports on the system side to 1 */
+               /* Set the number of ports on the system side to 1 */
                REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
                if (max_speed == SPEED_10000) {
                        DP(NETIF_MSG_LINK,
@@ -1729,8 +1686,7 @@ static void bnx2x_xmac_disable(struct link_params *params)
 
        if (REG_RD(bp, MISC_REG_RESET_REG_2) &
            MISC_REGISTERS_RESET_REG_2_XMAC) {
-               /*
-                * Send an indication to change the state in the NIG back to XON
+               /* Send an indication to change the state in the NIG back to XON
                 * Clearing this bit enables the next set of this bit to get
                 * rising edge
                 */
@@ -1755,13 +1711,11 @@ static int bnx2x_xmac_enable(struct link_params *params,
 
        bnx2x_xmac_init(params, vars->line_speed);
 
-       /*
-        * This register determines on which events the MAC will assert
+       /* This register determines on which events the MAC will assert
         * error on the i/f to the NIG along w/ EOP.
         */
 
-       /*
-        * This register tells the NIG whether to send traffic to UMAC
+       /* This register tells the NIG whether to send traffic to UMAC
         * or XMAC
         */
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
@@ -1863,8 +1817,7 @@ static int bnx2x_emac_enable(struct link_params *params,
        val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
        val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
 
-       /*
-        * Setting this bit causes MAC control frames (except for pause
+       /* Setting this bit causes MAC control frames (except for pause
         * frames) to be passed on for processing. This setting has no
         * affect on the operation of the pause frames. This bit effects
         * all packets regardless of RX Parser packet sorting logic.
@@ -1963,8 +1916,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
                                   struct link_vars *vars,
                                   u8 is_lb)
 {
-       /*
-        * Set rx control: Strip CRC and enable BigMAC to relay
+       /* Set rx control: Strip CRC and enable BigMAC to relay
         * control packets to the system as well
         */
        u32 wb_data[2];
@@ -2016,8 +1968,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
 
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
 
-       /*
-        * Set Time (based unit is 512 bit time) between automatic
+       /* Set Time (based unit is 512 bit time) between automatic
         * re-sending of PP packets amd enable automatic re-send of
         * Per-Priroity Packet as long as pp_gen is asserted and
         * pp_disable is low.
@@ -2086,7 +2037,7 @@ static int bnx2x_pfc_brb_get_config_params(
        config_val->default_class1.full_xon = 0;
 
        if (CHIP_IS_E2(bp)) {
-               /*  class0 defaults */
+               /* Class0 defaults */
                config_val->default_class0.pause_xoff =
                        DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
                config_val->default_class0.pause_xon =
@@ -2095,7 +2046,7 @@ static int bnx2x_pfc_brb_get_config_params(
                        DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
                config_val->default_class0.full_xon =
                        DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
-               /*  pause able*/
+               /* Pause able*/
                config_val->pauseable_th.pause_xoff =
                        PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                config_val->pauseable_th.pause_xon =
@@ -2114,7 +2065,7 @@ static int bnx2x_pfc_brb_get_config_params(
                config_val->non_pauseable_th.full_xon =
                        PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
        } else if (CHIP_IS_E3A0(bp)) {
-               /*  class0 defaults */
+               /* Class0 defaults */
                config_val->default_class0.pause_xoff =
                        DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
                config_val->default_class0.pause_xon =
@@ -2123,7 +2074,7 @@ static int bnx2x_pfc_brb_get_config_params(
                        DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
                config_val->default_class0.full_xon =
                        DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
-               /*  pause able */
+               /* Pause able */
                config_val->pauseable_th.pause_xoff =
                        PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                config_val->pauseable_th.pause_xon =
@@ -2142,7 +2093,7 @@ static int bnx2x_pfc_brb_get_config_params(
                config_val->non_pauseable_th.full_xon =
                        PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
        } else if (CHIP_IS_E3B0(bp)) {
-               /*  class0 defaults */
+               /* Class0 defaults */
                config_val->default_class0.pause_xoff =
                        DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
                config_val->default_class0.pause_xon =
@@ -2305,27 +2256,23 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
                        reg_th_config = &config_val.non_pauseable_th;
        } else
                reg_th_config = &config_val.default_class0;
-       /*
-        * The number of free blocks below which the pause signal to class 0
+       /* The number of free blocks below which the pause signal to class 0
         * of MAC #n is asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
               BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
               reg_th_config->pause_xoff);
-       /*
-        * The number of free blocks above which the pause signal to class 0
+       /* The number of free blocks above which the pause signal to class 0
         * of MAC #n is de-asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
               BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
-       /*
-        * The number of free blocks below which the full signal to class 0
+       /* The number of free blocks below which the full signal to class 0
         * of MAC #n is asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
               BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
-       /*
-        * The number of free blocks above which the full signal to class 0
+       /* The number of free blocks above which the full signal to class 0
         * of MAC #n is de-asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
@@ -2339,30 +2286,26 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
                        reg_th_config = &config_val.non_pauseable_th;
        } else
                reg_th_config = &config_val.default_class1;
-       /*
-        * The number of free blocks below which the pause signal to
+       /* The number of free blocks below which the pause signal to
         * class 1 of MAC #n is asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
               BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
               reg_th_config->pause_xoff);
 
-       /*
-        * The number of free blocks above which the pause signal to
+       /* The number of free blocks above which the pause signal to
         * class 1 of MAC #n is de-asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
               BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
               reg_th_config->pause_xon);
-       /*
-        * The number of free blocks below which the full signal to
+       /* The number of free blocks below which the full signal to
         * class 1 of MAC #n is asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
               BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
               reg_th_config->full_xoff);
-       /*
-        * The number of free blocks above which the full signal to
+       /* The number of free blocks above which the full signal to
         * class 1 of MAC #n is de-asserted. n=0,1
         */
        REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
@@ -2379,49 +2322,41 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
                REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
                           e3b0_val.per_class_guaranty_mode);
 
-               /*
-                * The hysteresis on the guarantied buffer space for the Lb
+               /* The hysteresis on the guarantied buffer space for the Lb
                 * port before signaling XON.
                 */
                REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
                           e3b0_val.lb_guarantied_hyst);
 
-               /*
-                * The number of free blocks below which the full signal to the
+               /* The number of free blocks below which the full signal to the
                 * LB port is asserted.
                 */
                REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
                       e3b0_val.full_lb_xoff_th);
-               /*
-                * The number of free blocks above which the full signal to the
+               /* The number of free blocks above which the full signal to the
                 * LB port is de-asserted.
                 */
                REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
                       e3b0_val.full_lb_xon_threshold);
-               /*
-                * The number of blocks guarantied for the MAC #n port. n=0,1
+               /* The number of blocks guarantied for the MAC #n port. n=0,1
                 */
 
-               /* The number of blocks guarantied for the LB port.*/
+               /* The number of blocks guarantied for the LB port. */
                REG_WR(bp, BRB1_REG_LB_GUARANTIED,
                       e3b0_val.lb_guarantied);
 
-               /*
-                * The number of blocks guarantied for the MAC #n port.
-                */
+               /* The number of blocks guarantied for the MAC #n port. */
                REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
                       2 * e3b0_val.mac_0_class_t_guarantied);
                REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
                       2 * e3b0_val.mac_1_class_t_guarantied);
-               /*
-                * The number of blocks guarantied for class #t in MAC0. t=0,1
+               /* The number of blocks guarantied for class #t in MAC0. t=0,1
                 */
                REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
                       e3b0_val.mac_0_class_t_guarantied);
                REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
                       e3b0_val.mac_0_class_t_guarantied);
-               /*
-                * The hysteresis on the guarantied buffer space for class in
+               /* The hysteresis on the guarantied buffer space for class in
                 * MAC0.  t=0,1
                 */
                REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
@@ -2429,15 +2364,13 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
                REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
                       e3b0_val.mac_0_class_t_guarantied_hyst);
 
-               /*
-                * The number of blocks guarantied for class #t in MAC1.t=0,1
+               /* The number of blocks guarantied for class #t in MAC1.t=0,1
                 */
                REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
                       e3b0_val.mac_1_class_t_guarantied);
                REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
                       e3b0_val.mac_1_class_t_guarantied);
-               /*
-                * The hysteresis on the guarantied buffer space for class #t
+               /* The hysteresis on the guarantied buffer space for class #t
                 * in MAC1.  t=0,1
                 */
                REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
@@ -2520,15 +2453,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                FEATURE_CONFIG_PFC_ENABLED;
        DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
 
-       /*
-        * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+       /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
         * MAC control frames (that are not pause packets)
         * will be forwarded to the XCM.
         */
        xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
                          NIG_REG_LLH0_XCM_MASK);
-       /*
-        * nig params will override non PFC params, since it's possible to
+       /* NIG params will override non PFC params, since it's possible to
         * do transition from PFC to SAFC
         */
        if (set_pfc) {
@@ -2548,7 +2479,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                        llfc_out_en = nig_params->llfc_out_en;
                        llfc_enable = nig_params->llfc_enable;
                        pause_enable = nig_params->pause_enable;
-               } else  /*defaul non PFC mode - PAUSE */
+               } else  /* Default non PFC mode - PAUSE */
                        pause_enable = 1;
 
                xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
@@ -2608,8 +2539,7 @@ int bnx2x_update_pfc(struct link_params *params,
                      struct link_vars *vars,
                      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 {
-       /*
-        * The PFC and pause are orthogonal to one another, meaning when
+       /* The PFC and pause are orthogonal to one another, meaning when
         * PFC is enabled, the pause are disabled, and when PFC is
         * disabled, pause are set according to the pause result.
         */
@@ -3148,7 +3078,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                              EMAC_MDIO_STATUS_10MB);
 
        /* address */
-
        tmp = ((phy->addr << 21) | (devad << 16) | reg |
               EMAC_MDIO_COMM_COMMAND_ADDRESS |
               EMAC_MDIO_COMM_START_BUSY);
@@ -3337,8 +3266,7 @@ int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
                   u8 devad, u16 reg, u16 *ret_val)
 {
        u8 phy_index;
-       /*
-        * Probe for the phy according to the given phy_addr, and execute
+       /* Probe for the phy according to the given phy_addr, and execute
         * the read request on it
         */
        for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3355,8 +3283,7 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
                    u8 devad, u16 reg, u16 val)
 {
        u8 phy_index;
-       /*
-        * Probe for the phy according to the given phy_addr, and execute
+       /* Probe for the phy according to the given phy_addr, and execute
         * the write request on it
         */
        for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3382,7 +3309,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
        if (bnx2x_is_4_port_mode(bp)) {
                u32 port_swap, port_swap_ovr;
 
-               /*figure out path swap value */
+               /* Figure out path swap value */
                path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
                if (path_swap_ovr & 0x1)
                        path_swap = (path_swap_ovr & 0x2);
@@ -3392,7 +3319,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
                if (path_swap)
                        path = path ^ 1;
 
-               /*figure out port swap value */
+               /* Figure out port swap value */
                port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
                if (port_swap_ovr & 0x1)
                        port_swap = (port_swap_ovr & 0x2);
@@ -3405,7 +3332,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
                lane = (port<<1) + path;
        } else { /* two port mode - no port swap */
 
-               /*figure out path swap value */
+               /* Figure out path swap value */
                path_swap_ovr =
                        REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
                if (path_swap_ovr & 0x1) {
@@ -3437,8 +3364,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
 
        if (USES_WARPCORE(bp)) {
                aer_val = bnx2x_get_warpcore_lane(phy, params);
-               /*
-                * In Dual-lane mode, two lanes are joined together,
+               /* In Dual-lane mode, two lanes are joined together,
                 * so in order to configure them, the AER broadcast method is
                 * used here.
                 * 0x200 is the broadcast address for lanes 0,1
@@ -3518,8 +3444,7 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-       /**
-        * resolve pause mode and advertisement Please refer to Table
+       /* Resolve pause mode and advertisement Please refer to Table
         * 28B-3 of the 802.3ab-1999 spec
         */
 
@@ -3642,6 +3567,7 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
                vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
        if (pause_result & (1<<1))
                vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+
 }
 
 static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
@@ -3698,6 +3624,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
        bnx2x_pause_resolve(vars, pause_result);
 
 }
+
 static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
                                   struct link_params *params,
                                   struct link_vars *vars)
@@ -3819,9 +3746,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
 
        /* Advertise pause */
        bnx2x_ext_phy_set_pause(params, phy, vars);
-
-       /*
-        * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+       /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
         */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                        MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
@@ -3829,7 +3754,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
                vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
        }
-
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                        MDIO_WC_REG_DIGITAL5_MISC7, &val16);
 
@@ -3903,7 +3827,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
                         MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
 
-       /*Enable encoded forced speed */
+       /* Enable encoded forced speed */
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
 
@@ -4265,8 +4189,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
                                PORT_HW_CFG_E3_MOD_ABS_MASK) >>
                                PORT_HW_CFG_E3_MOD_ABS_SHIFT;
 
-               /*
-                * Should not happen. This function called upon interrupt
+               /* Should not happen. This function called upon interrupt
                 * triggered by GPIO ( since EPIO can only generate interrupts
                 * to MCP).
                 * So if this function was called and none of the GPIOs was set,
@@ -4366,7 +4289,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
                                        "link up, rx_tx_asic_rst 0x%x\n",
                                        vars->rx_tx_asic_rst);
                        } else {
-                               /*reset the lane to see if link comes up.*/
+                               /* Reset the lane to see if link comes up.*/
                                bnx2x_warpcore_reset_lane(bp, phy, 1);
                                bnx2x_warpcore_reset_lane(bp, phy, 0);
 
@@ -4387,7 +4310,6 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
        } /*params->rx_tx_asic_rst*/
 
 }
-
 static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                                       struct link_params *params,
                                       struct link_vars *vars)
@@ -4545,7 +4467,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
        /* Update those 1-copy registers */
        CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
                          MDIO_AER_BLOCK_AER_REG, 0);
-               /* Enable 1G MDIO (1-copy) */
+       /* Enable 1G MDIO (1-copy) */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                        MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
                        &val16);
@@ -4624,43 +4546,43 @@ void bnx2x_sync_link(struct link_params *params,
                vars->duplex = DUPLEX_FULL;
                switch (vars->link_status &
                        LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
-                       case LINK_10THD:
-                               vars->duplex = DUPLEX_HALF;
-                               /* fall thru */
-                       case LINK_10TFD:
-                               vars->line_speed = SPEED_10;
-                               break;
+               case LINK_10THD:
+                       vars->duplex = DUPLEX_HALF;
+                       /* Fall thru */
+               case LINK_10TFD:
+                       vars->line_speed = SPEED_10;
+                       break;
 
-                       case LINK_100TXHD:
-                               vars->duplex = DUPLEX_HALF;
-                               /* fall thru */
-                       case LINK_100T4:
-                       case LINK_100TXFD:
-                               vars->line_speed = SPEED_100;
-                               break;
+               case LINK_100TXHD:
+                       vars->duplex = DUPLEX_HALF;
+                       /* Fall thru */
+               case LINK_100T4:
+               case LINK_100TXFD:
+                       vars->line_speed = SPEED_100;
+                       break;
 
-                       case LINK_1000THD:
-                               vars->duplex = DUPLEX_HALF;
-                               /* fall thru */
-                       case LINK_1000TFD:
-                               vars->line_speed = SPEED_1000;
-                               break;
+               case LINK_1000THD:
+                       vars->duplex = DUPLEX_HALF;
+                       /* Fall thru */
+               case LINK_1000TFD:
+                       vars->line_speed = SPEED_1000;
+                       break;
 
-                       case LINK_2500THD:
-                               vars->duplex = DUPLEX_HALF;
-                               /* fall thru */
-                       case LINK_2500TFD:
-                               vars->line_speed = SPEED_2500;
-                               break;
+               case LINK_2500THD:
+                       vars->duplex = DUPLEX_HALF;
+                       /* Fall thru */
+               case LINK_2500TFD:
+                       vars->line_speed = SPEED_2500;
+                       break;
 
-                       case LINK_10GTFD:
-                               vars->line_speed = SPEED_10000;
-                               break;
-                       case LINK_20GTFD:
-                               vars->line_speed = SPEED_20000;
-                               break;
-                       default:
-                               break;
+               case LINK_10GTFD:
+                       vars->line_speed = SPEED_10000;
+                       break;
+               case LINK_20GTFD:
+                       vars->line_speed = SPEED_20000;
+                       break;
+               default:
+                       break;
                }
                vars->flow_ctrl = 0;
                if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
@@ -4835,9 +4757,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
                                 struct bnx2x_phy *phy)
 {
        struct bnx2x *bp = params->bp;
-       /*
-        *  Each two bits represents a lane number:
-        *  No swap is 0123 => 0x1b no need to enable the swap
+       /* Each two bits represents a lane number:
+        * No swap is 0123 => 0x1b no need to enable the swap
         */
        u16 rx_lane_swap, tx_lane_swap;
 
@@ -5051,8 +4972,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
                          MDIO_REG_BANK_COMBO_IEEE0,
                          MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 
-       /*
-        * program speed
+       /* Program speed
         *  - needed only if the speed is greater than 1G (2.5G or 10G)
         */
        CL22_RD_OVER_CL45(bp, phy,
@@ -5087,8 +5007,6 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 val = 0;
 
-       /* configure the 48 bits for BAM AN */
-
        /* set extended capabilities */
        if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
                val |= MDIO_OVER_1G_UP1_2_5G;
@@ -5234,11 +5152,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
        }
 }
 
-
-/*
- * link management
+/* Link management
  */
-
 static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
                                             struct link_params *params)
 {
@@ -5383,8 +5298,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
                             "ustat_val(0x8371) = 0x%x\n", ustat_val);
                return;
        }
-       /*
-        * Step 3: Check CL37 Message Pages received to indicate LP
+       /* Step 3: Check CL37 Message Pages received to indicate LP
         * supports only CL37
         */
        CL22_RD_OVER_CL45(bp, phy,
@@ -5401,8 +5315,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
                         cl37_fsm_received);
                return;
        }
-       /*
-        * The combined cl37/cl73 fsm state information indicating that
+       /* The combined cl37/cl73 fsm state information indicating that
         * we are connected to a device which does not support cl73, but
         * does support cl37 BAM. In this case we disable cl73 and
         * restart cl37 auto-neg
@@ -5973,8 +5886,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
 {
        u32 latch_status = 0;
 
-       /*
-        * Disable the MI INT ( external phy int ) by writing 1 to the
+       /* Disable the MI INT ( external phy int ) by writing 1 to the
         * status register. Link down indication is high-active-signal,
         * so in this case we need to write the status to clear the XOR
         */
@@ -6009,8 +5921,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
        u32 mask;
-       /*
-        * First reset all status we assume only one line will be
+       /* First reset all status we assume only one line will be
         * change at a time
         */
        bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -6024,8 +5935,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
                        if (is_10g_plus)
                                mask = NIG_STATUS_XGXS0_LINK10G;
                        else if (params->switch_cfg == SWITCH_CFG_10G) {
-                               /*
-                                * Disable the link interrupt by writing 1 to
+                               /* Disable the link interrupt by writing 1 to
                                 * the relevant lane in the status register
                                 */
                                u32 ser_lane =
@@ -6227,8 +6137,7 @@ int bnx2x_set_led(struct link_params *params,
                break;
 
        case LED_MODE_OPER:
-               /*
-                * For all other phys, OPER mode is same as ON, so in case
+               /* For all other phys, OPER mode is same as ON, so in case
                 * link is down, do nothing
                 */
                if (!vars->link_up)
@@ -6239,9 +6148,7 @@ int bnx2x_set_led(struct link_params *params,
                         (params->phy[EXT_PHY1].type ==
                          PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
                    CHIP_IS_E2(bp) && params->num_phys == 2) {
-                       /*
-                        * This is a work-around for E2+8727 Configurations
-                        */
+                       /* This is a work-around for E2+8727 Configurations */
                        if (mode == LED_MODE_ON ||
                                speed == SPEED_10000){
                                REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -6250,8 +6157,7 @@ int bnx2x_set_led(struct link_params *params,
                                tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
                                EMAC_WR(bp, EMAC_REG_EMAC_LED,
                                        (tmp | EMAC_LED_OVERRIDE));
-                               /*
-                                * return here without enabling traffic
+                               /* Return here without enabling traffic
                                 * LED blink and setting rate in ON mode.
                                 * In oper mode, enabling LED blink
                                 * and setting rate is needed.
@@ -6260,8 +6166,7 @@ int bnx2x_set_led(struct link_params *params,
                                        return rc;
                        }
                } else if (SINGLE_MEDIA_DIRECT(params)) {
-                       /*
-                        * This is a work-around for HW issue found when link
+                       /* This is a work-around for HW issue found when link
                         * is up in CL73
                         */
                        if ((!CHIP_IS_E3(bp)) ||
@@ -6310,10 +6215,7 @@ int bnx2x_set_led(struct link_params *params,
                     (speed == SPEED_1000) ||
                     (speed == SPEED_100) ||
                     (speed == SPEED_10))) {
-                       /*
-                        * On Everest 1 Ax chip versions for speeds less than
-                        * 10G LED scheme is different
-                        */
+                       /* For speeds less than 10G LED scheme is different */
                        REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
                               + port*4, 1);
                        REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
@@ -6333,8 +6235,7 @@ int bnx2x_set_led(struct link_params *params,
 
 }
 
-/*
- * This function comes to reflect the actual link state read DIRECTLY from the
+/* This function comes to reflect the actual link state read DIRECTLY from the
  * HW
  */
 int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
@@ -6422,16 +6323,14 @@ static int bnx2x_link_initialize(struct link_params *params,
        int rc = 0;
        u8 phy_index, non_ext_phy;
        struct bnx2x *bp = params->bp;
-       /*
-        * In case of external phy existence, the line speed would be the
+       /* In case of external phy existence, the line speed would be the
         * line speed linked up by the external phy. In case it is direct
         * only, then the line_speed during initialization will be
         * equal to the req_line_speed
         */
        vars->line_speed = params->phy[INT_PHY].req_line_speed;
 
-       /*
-        * Initialize the internal phy in case this is a direct board
+       /* Initialize the internal phy in case this is a direct board
         * (no external phys), or this board has external phy which requires
         * to first.
         */
@@ -6463,8 +6362,7 @@ static int bnx2x_link_initialize(struct link_params *params,
        } else {
                for (phy_index = EXT_PHY1; phy_index < params->num_phys;
                      phy_index++) {
-                       /*
-                        * No need to initialize second phy in case of first
+                       /* No need to initialize second phy in case of first
                         * phy only selection. In case of second phy, we do
                         * need to initialize the first phy, since they are
                         * connected.
@@ -6492,7 +6390,6 @@ static int bnx2x_link_initialize(struct link_params *params,
                        NIG_STATUS_XGXS0_LINK_STATUS |
                        NIG_STATUS_SERDES0_LINK_STATUS |
                        NIG_MASK_MI_INT));
-       bnx2x_update_mng(params, vars->link_status);
        return rc;
 }
 
@@ -6577,7 +6474,7 @@ static int bnx2x_update_link_up(struct link_params *params,
                                u8 link_10g)
 {
        struct bnx2x *bp = params->bp;
-       u8 port = params->port;
+       u8 phy_idx, port = params->port;
        int rc = 0;
 
        vars->link_status |= (LINK_STATUS_LINK_UP |
@@ -6641,11 +6538,18 @@ static int bnx2x_update_link_up(struct link_params *params,
 
        /* update shared memory */
        bnx2x_update_mng(params, vars->link_status);
+
+       /* Check remote fault */
+       for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+               if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+                       bnx2x_check_half_open_conn(params, vars, 0);
+                       break;
+               }
+       }
        msleep(20);
        return rc;
 }
-/*
- * The bnx2x_link_update function should be called upon link
+/* The bnx2x_link_update function should be called upon link
  * interrupt.
  * Link is considered up as follows:
  * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
@@ -6702,8 +6606,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        if (!CHIP_IS_E3(bp))
                REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
-       /*
-        * Step 1:
+       /* Step 1:
         * Check external link change only for external phys, and apply
         * priority selection between them in case the link on both phys
         * is up. Note that instead of the common vars, a temporary
@@ -6734,23 +6637,20 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                        switch (bnx2x_phy_selection(params)) {
                        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
                        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
-                       /*
-                        * In this option, the first PHY makes sure to pass the
+                       /* In this option, the first PHY makes sure to pass the
                         * traffic through itself only.
                         * Its not clear how to reset the link on the second phy
                         */
                                active_external_phy = EXT_PHY1;
                                break;
                        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
-                       /*
-                        * In this option, the first PHY makes sure to pass the
+                       /* In this option, the first PHY makes sure to pass the
                         * traffic through the second PHY.
                         */
                                active_external_phy = EXT_PHY2;
                                break;
                        default:
-                       /*
-                        * Link indication on both PHYs with the following cases
+                       /* Link indication on both PHYs with the following cases
                         * is invalid:
                         * - FIRST_PHY means that second phy wasn't initialized,
                         * hence its link is expected to be down
@@ -6767,8 +6667,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                }
        }
        prev_line_speed = vars->line_speed;
-       /*
-        * Step 2:
+       /* Step 2:
         * Read the status of the internal phy. In case of
         * DIRECT_SINGLE_MEDIA board, this link is the external link,
         * otherwise this is the link between the 577xx and the first
@@ -6778,8 +6677,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                params->phy[INT_PHY].read_status(
                        &params->phy[INT_PHY],
                        params, vars);
-       /*
-        * The INT_PHY flow control reside in the vars. This include the
+       /* The INT_PHY flow control reside in the vars. This include the
         * case where the speed or flow control are not set to AUTO.
         * Otherwise, the active external phy flow control result is set
         * to the vars. The ext_phy_line_speed is needed to check if the
@@ -6788,14 +6686,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
         */
        if (active_external_phy > INT_PHY) {
                vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
-               /*
-                * Link speed is taken from the XGXS. AN and FC result from
+               /* Link speed is taken from the XGXS. AN and FC result from
                 * the external phy.
                 */
                vars->link_status |= phy_vars[active_external_phy].link_status;
 
-               /*
-                * if active_external_phy is first PHY and link is up - disable
+               /* if active_external_phy is first PHY and link is up - disable
                 * disable TX on second external PHY
                 */
                if (active_external_phy == EXT_PHY1) {
@@ -6832,8 +6728,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
                   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
                   vars->link_status, ext_phy_line_speed);
-       /*
-        * Upon link speed change set the NIG into drain mode. Comes to
+       /* Upon link speed change set the NIG into drain mode. Comes to
         * deals with possible FIFO glitch due to clk change when speed
         * is decreased without link down indicator
         */
@@ -6858,8 +6753,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 
        bnx2x_link_int_ack(params, vars, link_10g_plus);
 
-       /*
-        * In case external phy link is up, and internal link is down
+       /* In case external phy link is up, and internal link is down
         * (not initialized yet probably after link initialization, it
         * needs to be initialized.
         * Note that after link down-up as result of cable plug, the xgxs
@@ -6887,8 +6781,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                                                vars);
                }
        }
-       /*
-        * Link is up only if both local phy and external phy (in case of
+       /* Link is up only if both local phy and external phy (in case of
         * non-direct board) are up and no fault detected on active PHY.
         */
        vars->link_up = (vars->phy_link_up &&
@@ -7120,8 +7013,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
        }
        /* XAUI workaround in 8073 A0: */
 
-       /*
-        * After loading the boot ROM and restarting Autoneg, poll
+       /* After loading the boot ROM and restarting Autoneg, poll
         * Dev1, Reg $C820:
         */
 
@@ -7130,8 +7022,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
                                &val);
-                 /*
-                  * If bit [14] = 0 or bit [13] = 0, continue on with
+                 /* If bit [14] = 0 or bit [13] = 0, continue on with
                   * system initialization (XAUI work-around not required, as
                   * these bits indicate 2.5G or 1G link up).
                   */
@@ -7140,8 +7031,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
                        return 0;
                } else if (!(val & (1<<15))) {
                        DP(NETIF_MSG_LINK, "bit 15 went off\n");
-                       /*
-                        * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+                       /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
                         * MSB (bit15) goes to 1 (indicating that the XAUI
                         * workaround has completed), then continue on with
                         * system initialization.
@@ -7291,8 +7181,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
                        val = (1<<7);
                } else if (phy->req_line_speed ==  SPEED_2500) {
                        val = (1<<5);
-                       /*
-                        * Note that 2.5G works only when used with 1G
+                       /* Note that 2.5G works only when used with 1G
                         * advertisement
                         */
                } else
@@ -7343,8 +7232,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
        /* Add support for CL37 (passive mode) III */
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 
-       /*
-        * The SNR will improve about 2db by changing BW and FEE main
+       /* The SNR will improve about 2db by changing BW and FEE main
         * tap. Rest commands are executed after link is up
         * Change FFE main cursor to 5 in EDC register
         */
@@ -7431,8 +7319,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
 
        link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
        if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
-               /*
-                * The SNR will improve about 2dbby changing the BW and FEE main
+               /* The SNR will improve about 2dbby changing the BW and FEE main
                 * tap. The 1st write to change FFE main tap is set before
                 * restart AN. Change PLL Bandwidth in EDC register
                 */
@@ -7479,8 +7366,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
                        bnx2x_cl45_read(bp, phy,
                                        MDIO_XS_DEVAD,
                                        MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
-                       /*
-                        * Set bit 3 to invert Rx in 1G mode and clear this bit
+                       /* Set bit 3 to invert Rx in 1G mode and clear this bit
                         * when it`s in 10G mode.
                         */
                        if (vars->line_speed == SPEED_1000) {
@@ -7602,8 +7488,7 @@ static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
                                           u8 pmd_dis)
 {
        struct bnx2x *bp = params->bp;
-       /*
-        * Disable transmitter only for bootcodes which can enable it afterwards
+       /* Disable transmitter only for bootcodes which can enable it afterwards
         * (for D3 link)
         */
        if (pmd_dis) {
@@ -7780,9 +7665,6 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        u32 data_array[4];
        u16 addr32;
        struct bnx2x *bp = params->bp;
-       /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
-                                       " addr %d, cnt %d\n",
-                                       addr, byte_cnt);*/
        if (byte_cnt > 16) {
                DP(NETIF_MSG_LINK,
                   "Reading from eeprom is limited to 16 bytes\n");
@@ -7847,8 +7729,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                         MDIO_PMA_DEVAD,
                         MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
                         0x8002);
-       /*
-        * Wait appropriate time for two-wire command to finish before
+       /* Wait appropriate time for two-wire command to finish before
         * polling the status register
         */
        msleep(1);
@@ -7941,8 +7822,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
        {
                u8 copper_module_type;
                phy->media_type = ETH_PHY_DA_TWINAX;
-               /*
-                * Check if its active cable (includes SFP+ module)
+               /* Check if its active cable (includes SFP+ module)
                 * of passive cable
                 */
                if (bnx2x_read_sfp_module_eeprom(phy,
@@ -8019,8 +7899,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
        return 0;
 }
-/*
- * This function read the relevant field from the module (SFP+), and verify it
+/* This function read the relevant field from the module (SFP+), and verify it
  * is compliant with this board
  */
 static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
@@ -8102,8 +7981,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
        u8 val;
        struct bnx2x *bp = params->bp;
        u16 timeout;
-       /*
-        * Initialization time after hot-plug may take up to 300ms for
+       /* Initialization time after hot-plug may take up to 300ms for
         * some phys type ( e.g. JDSU )
         */
 
@@ -8125,8 +8003,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
                                    u8 is_power_up) {
        /* Make sure GPIOs are not using for LED mode */
        u16 val;
-       /*
-        * In the GPIO register, bit 4 is use to determine if the GPIOs are
+       /* In the GPIO register, bit 4 is use to determine if the GPIOs are
         * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
         * output
         * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
@@ -8142,8 +8019,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
        if (is_power_up)
                val = (1<<4);
        else
-               /*
-                * Set GPIO control to OUTPUT, and set the power bit
+               /* Set GPIO control to OUTPUT, and set the power bit
                 * to according to the is_power_up
                 */
                val = (1<<1);
@@ -8177,8 +8053,7 @@ static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
 
                DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
 
-               /*
-                * Changing to LRM mode takes quite few seconds. So do it only
+               /* Changing to LRM mode takes quite few seconds. So do it only
                 * if current mode is limiting (default is LRM)
                 */
                if (cur_limiting_mode != EDC_MODE_LIMITING)
@@ -8313,8 +8188,7 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
        if (CHIP_IS_E3(bp)) {
-               /*
-                * Low ==> if SFP+ module is supported otherwise
+               /* Low ==> if SFP+ module is supported otherwise
                 * High ==> if SFP+ module is not on the approved vendor list
                 */
                bnx2x_set_e3_module_fault_led(params, gpio_mode);
@@ -8339,8 +8213,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
                return;
        DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
                       power, pin_cfg);
-       /*
-        * Low ==> corresponding SFP+ module is powered
+       /* Low ==> corresponding SFP+ module is powered
         * high ==> the SFP+ module is powered down
         */
        bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
@@ -8474,14 +8347,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
                bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
        }
 
-       /*
-        * Check and set limiting mode / LRM mode on 8726. On 8727 it
+       /* Check and set limiting mode / LRM mode on 8726. On 8727 it
         * is done automatically
         */
        bnx2x_set_limiting_mode(params, phy, edc_mode);
 
-       /*
-        * Enable transmit for this module if the module is approved, or
+       /* Enable transmit for this module if the module is approved, or
         * if unapproved modules should also enable the Tx laser
         */
        if (rc == 0 ||
@@ -8536,8 +8407,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
                bnx2x_set_gpio_int(bp, gpio_num,
                                   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
                                   gpio_port);
-               /*
-                * Module was plugged out.
+               /* Module was plugged out.
                 * Disable transmit for this module
                 */
                phy->media_type = ETH_PHY_NOT_PRESENT;
@@ -8607,8 +8477,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
 
        DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
                        " link_status 0x%x\n", rx_sd, pcs_status, val2);
-       /*
-        * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+       /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
         * are set, or if the autoneg bit 1 is set
         */
        link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
@@ -8722,8 +8591,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
        }
        bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
 
-       /*
-        * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+       /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
         * power mode, if TX Laser is disabled
         */
 
@@ -8833,8 +8701,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
 
        bnx2x_8726_external_rom_boot(phy, params);
 
-       /*
-        * Need to call module detected on initialization since the module
+       /* Need to call module detected on initialization since the module
         * detection triggered by actual module insertion might occur before
         * driver is loaded, and when driver is loaded, it reset all
         * registers, including the transmitter
@@ -8871,8 +8738,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
                bnx2x_cl45_write(bp, phy,
                                MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
-               /*
-                * Enable RX-ALARM control to receive interrupt for 1G speed
+               /* Enable RX-ALARM control to receive interrupt for 1G speed
                 * change
                 */
                bnx2x_cl45_write(bp, phy,
@@ -8973,8 +8839,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
                                struct link_params *params) {
        u32 swap_val, swap_override;
        u8 port;
-       /*
-        * The PHY reset is controlled by GPIO 1. Fake the port number
+       /* The PHY reset is controlled by GPIO 1. Fake the port number
         * to cancel the swap done in set_gpio()
         */
        struct bnx2x *bp = params->bp;
@@ -9012,14 +8877,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
 
-       /*
-        * Initially configure MOD_ABS to interrupt when module is
+       /* Initially configure MOD_ABS to interrupt when module is
         * presence( bit 8)
         */
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
-       /*
-        * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+       /* Set EDC off by setting OPTXLOS signal input to low (bit 9).
         * When the EDC is off it locks onto a reference clock and avoids
         * becoming 'lost'
         */
@@ -9040,8 +8903,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
        if (phy->flags & FLAGS_NOC)
                val |= (3<<5);
 
-       /*
-        * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+       /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
         * status which reflect SFP+ module over-current
         */
        if (!(phy->flags & FLAGS_NOC))
@@ -9067,8 +8929,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
                DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-               /*
-                * Power down the XAUI until link is up in case of dual-media
+               /* Power down the XAUI until link is up in case of dual-media
                 * and 1G
                 */
                if (DUAL_MEDIA(params)) {
@@ -9093,8 +8954,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
        } else {
-               /*
-                * Since the 8727 has only single reset pin, need to set the 10G
+               /* Since the 8727 has only single reset pin, need to set the 10G
                 * registers although it is default
                 */
                bnx2x_cl45_write(bp, phy,
@@ -9109,8 +8969,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 0x0008);
        }
 
-       /*
-        * Set 2-wire transfer rate of SFP+ module EEPROM
+       /* Set 2-wire transfer rate of SFP+ module EEPROM
         * to 100Khz since some DACs(direct attached cables) do
         * not work at 400Khz.
         */
@@ -9133,8 +8992,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 phy->tx_preemphasis[1]);
        }
 
-       /*
-        * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+       /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
         * power mode, if TX Laser is disabled
         */
        tx_en_mode = REG_RD(bp, params->shmem_base +
@@ -9180,8 +9038,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK,
                   "MOD_ABS indication show module is absent\n");
                phy->media_type = ETH_PHY_NOT_PRESENT;
-               /*
-                * 1. Set mod_abs to detect next module
+               /* 1. Set mod_abs to detect next module
                 *    presence event
                 * 2. Set EDC off by setting OPTXLOS signal input to low
                 *    (bit 9).
@@ -9195,8 +9052,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                                 MDIO_PMA_DEVAD,
                                 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-               /*
-                * Clear RX alarm since it stays up as long as
+               /* Clear RX alarm since it stays up as long as
                 * the mod_abs wasn't changed
                 */
                bnx2x_cl45_read(bp, phy,
@@ -9207,8 +9063,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                /* Module is present */
                DP(NETIF_MSG_LINK,
                   "MOD_ABS indication show module is present\n");
-               /*
-                * First disable transmitter, and if the module is ok, the
+               /* First disable transmitter, and if the module is ok, the
                 * module_detection will enable it
                 * 1. Set mod_abs to detect next module absent event ( bit 8)
                 * 2. Restore the default polarity of the OPRXLOS signal and
@@ -9222,8 +9077,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                                 MDIO_PMA_DEVAD,
                                 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-               /*
-                * Clear RX alarm since it stays up as long as the mod_abs
+               /* Clear RX alarm since it stays up as long as the mod_abs
                 * wasn't changed. This is need to be done before calling the
                 * module detection, otherwise it will clear* the link update
                 * alarm
@@ -9284,8 +9138,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 
-       /*
-        * If a module is present and there is need to check
+       /* If a module is present and there is need to check
         * for over current
         */
        if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
@@ -9350,8 +9203,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                        MDIO_PMA_DEVAD,
                        MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
 
-       /*
-        * Bits 0..2 --> speed detected,
+       /* Bits 0..2 --> speed detected,
         * Bits 13..15--> link is down
         */
        if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
@@ -9394,8 +9246,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_8727_PCS_GP, &val1);
-               /*
-                * In case of dual-media board and 1G, power up the XAUI side,
+               /* In case of dual-media board and 1G, power up the XAUI side,
                 * otherwise power it down. For 10G it is done automatically
                 */
                if (link_up)
@@ -9561,8 +9412,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                /* Save spirom version */
                bnx2x_save_848xx_spirom_version(phy, bp, params->port);
        }
-       /*
-        * This phy uses the NIG latch mechanism since link indication
+       /* This phy uses the NIG latch mechanism since link indication
         * arrives through its LED4 and not via its LASI signal, so we
         * get steady signal instead of clear on read
         */
@@ -9667,8 +9517,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        if (phy->req_duplex == DUPLEX_FULL)
                autoneg_val |= (1<<8);
 
-       /*
-        * Always write this if this is not 84833.
+       /* Always write this if this is not 84833.
         * For 84833, write it only when it's a forced speed.
         */
        if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
@@ -9916,8 +9765,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        /* Wait for GPHY to come out of reset */
        msleep(50);
        if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-               /*
-                * BCM84823 requires that XGXS links up first @ 10G for normal
+               /* BCM84823 requires that XGXS links up first @ 10G for normal
                 * behavior.
                 */
                u16 temp;
@@ -10393,8 +10241,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                break;
        }
 
-       /*
-        * This is a workaround for E3+84833 until autoneg
+       /* This is a workaround for E3+84833 until autoneg
         * restart is fixed in f/w
         */
        if (CHIP_IS_E3(bp)) {
@@ -10418,8 +10265,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "54618SE cfg init\n");
        usleep_range(1000, 1000);
 
-       /*
-        * This works with E3 only, no need to check the chip
+       /* This works with E3 only, no need to check the chip
         * before determining the port.
         */
        port = params->port;
@@ -10441,7 +10287,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
                         MDIO_PMA_REG_CTRL, 0x8000);
        bnx2x_wait_reset_complete(bp, phy, params);
 
-       /*wait for GPHY to reset */
+       /* Wait for GPHY to reset */
        msleep(50);
 
        /* Configure LED4: set to INTR (0x6). */
@@ -10647,13 +10493,11 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
        u32 cfg_pin;
        u8 port;
 
-       /*
-        * In case of no EPIO routed to reset the GPHY, put it
+       /* In case of no EPIO routed to reset the GPHY, put it
         * in low power mode.
         */
        bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
-       /*
-        * This works with E3 only, no need to check the chip
+       /* This works with E3 only, no need to check the chip
         * before determining the port.
         */
        port = params->port;
@@ -10762,7 +10606,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
                bnx2x_ext_phy_resolve_fc(phy, params, vars);
 
                if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
-                       /* report LP advertised speeds */
+                       /* Report LP advertised speeds */
                        bnx2x_cl22_read(bp, phy, 0x5, &val);
 
                        if (val & (1<<5))
@@ -10827,8 +10671,7 @@ static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
        /* This register opens the gate for the UMAC despite its name */
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 
-       /*
-        * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+       /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
         * length used by the MAC receive logic to check frames.
         */
        REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -11101,22 +10944,23 @@ static struct bnx2x_phy phy_warpcore = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = FLAGS_HW_LOCK_REQUIRED,
+       .flags          = (FLAGS_HW_LOCK_REQUIRED |
+                          FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
        .supported      = (SUPPORTED_10baseT_Half |
-                            SUPPORTED_10baseT_Full |
-                            SUPPORTED_100baseT_Half |
-                            SUPPORTED_100baseT_Full |
-                            SUPPORTED_1000baseT_Full |
-                            SUPPORTED_10000baseT_Full |
-                            SUPPORTED_20000baseKR2_Full |
-                            SUPPORTED_20000baseMLD2_Full |
-                            SUPPORTED_FIBRE |
-                            SUPPORTED_Autoneg |
-                            SUPPORTED_Pause |
-                            SUPPORTED_Asym_Pause),
+                          SUPPORTED_10baseT_Full |
+                          SUPPORTED_100baseT_Half |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_1000baseT_Full |
+                          SUPPORTED_10000baseT_Full |
+                          SUPPORTED_20000baseKR2_Full |
+                          SUPPORTED_20000baseMLD2_Full |
+                          SUPPORTED_FIBRE |
+                          SUPPORTED_Autoneg |
+                          SUPPORTED_Pause |
+                          SUPPORTED_Asym_Pause),
        .media_type     = ETH_PHY_UNSPECIFIED,
        .ver_addr       = 0,
        .req_flow_ctrl  = 0,
@@ -11258,7 +11102,8 @@ static struct bnx2x_phy phy_8726 = {
        .addr           = 0xff,
        .def_md_devad   = 0,
        .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_INIT_XGXS_FIRST),
+                          FLAGS_INIT_XGXS_FIRST |
+                          FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11289,7 +11134,8 @@ static struct bnx2x_phy phy_8727 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = FLAGS_FAN_FAILURE_DET_REQ,
+       .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
+                          FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11354,8 +11200,9 @@ static struct bnx2x_phy phy_84823 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = FLAGS_FAN_FAILURE_DET_REQ |
-                         FLAGS_REARM_LATCH_SIGNAL,
+       .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
+                          FLAGS_REARM_LATCH_SIGNAL |
+                          FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11390,8 +11237,9 @@ static struct bnx2x_phy phy_84833 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = FLAGS_FAN_FAILURE_DET_REQ |
-                           FLAGS_REARM_LATCH_SIGNAL,
+       .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
+                          FLAGS_REARM_LATCH_SIGNAL |
+                          FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11466,9 +11314,8 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
        /* Get the 4 lanes xgxs config rx and tx */
        u32 rx = 0, tx = 0, i;
        for (i = 0; i < 2; i++) {
-               /*
-                * INT_PHY and EXT_PHY1 share the same value location in the
-                * shmem. When num_phys is greater than 1, than this value
+               /* INT_PHY and EXT_PHY1 share the same value location in
+                * the shmem. When num_phys is greater than 1, than this value
                 * applies only to EXT_PHY1
                 */
                if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
@@ -11546,8 +11393,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                                        offsetof(struct shmem_region, dev_info.
                                        port_hw_config[port].default_cfg)) &
                                 PORT_HW_CFG_NET_SERDES_IF_MASK);
-               /*
-                * Set the appropriate supported and flags indications per
+               /* Set the appropriate supported and flags indications per
                 * interface type of the chip
                 */
                switch (serdes_net_if) {
@@ -11605,8 +11451,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                        break;
                }
 
-               /*
-                * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+               /* Enable MDC/MDIO work-around for E3 A0 since free running MDC
                 * was not set as expected. For B0, ECO will be enabled so there
                 * won't be an issue there
                 */
@@ -11719,8 +11564,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
        bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
 
-       /*
-        * The shmem address of the phy version is located on different
+       /* The shmem address of the phy version is located on different
         * structures. In case this structure is too old, do not set
         * the address
         */
@@ -11754,8 +11598,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
 
        if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
            (phy->ver_addr)) {
-               /*
-                * Remove 100Mb link supported for BCM84833 when phy fw
+               /* Remove 100Mb link supported for BCM84833 when phy fw
                 * version lower than or equal to 1.39
                 */
                u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11765,8 +11608,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
                                            SUPPORTED_100baseT_Full);
        }
 
-       /*
-        * In case mdc/mdio_access of the external phy is different than the
+       /* In case mdc/mdio_access of the external phy is different than the
         * mdc/mdio access of the XGXS, a HW lock must be taken in each access
         * to prevent one port interfere with another port's CL45 operations.
         */
@@ -11936,13 +11778,16 @@ int bnx2x_phy_probe(struct link_params *params)
                if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
                        break;
 
+               if (params->feature_config_flags &
+                   FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
+                       phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+
                sync_offset = params->shmem_base +
                        offsetof(struct shmem_region,
                        dev_info.port_hw_config[params->port].media_type);
                media_types = REG_RD(bp, sync_offset);
 
-               /*
-                * Update media type for non-PMF sync only for the first time
+               /* Update media type for non-PMF sync only for the first time
                 * In case the media type changes afterwards, it will be updated
                 * using the update_status function
                 */
@@ -12016,8 +11861,7 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
        vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
        vars->mac_type = MAC_TYPE_XMAC;
        vars->phy_flags = PHY_XGXS_FLAG;
-       /*
-        * Set WC to loopback mode since link is required to provide clock
+       /* Set WC to loopback mode since link is required to provide clock
         * to the XMAC in 20G mode
         */
        bnx2x_set_aer_mmd(params, &params->phy[0]);
@@ -12162,6 +12006,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                bnx2x_link_int_enable(params);
                break;
        }
+       bnx2x_update_mng(params, vars->link_status);
        return 0;
 }
 
@@ -12302,7 +12147,8 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
                                NIG_MASK_MI_INT));
 
                /* Need to take the phy out of low power mode in order
-                       to write to access its registers */
+                * to write to access its registers
+                */
                bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
                               MISC_REGISTERS_GPIO_OUTPUT_HIGH,
                               port);
@@ -12350,8 +12196,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
                                 (val | 1<<10));
        }
 
-       /*
-        * Toggle Transmitter: Power down and then up with 600ms delay
+       /* Toggle Transmitter: Power down and then up with 600ms delay
         * between
         */
        msleep(600);
@@ -12494,8 +12339,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
        reset_gpio = MISC_REGISTERS_GPIO_1;
        port = 1;
 
-       /*
-        * Retrieve the reset gpio/port which control the reset.
+       /* Retrieve the reset gpio/port which control the reset.
         * Default is GPIO1, PORT1
         */
        bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
@@ -12670,8 +12514,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                break;
 
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-               /*
-                * GPIO1 affects both ports, so there's need to pull
+               /* GPIO1 affects both ports, so there's need to pull
                 * it for single port alone
                 */
                rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
@@ -12679,8 +12522,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                                                phy_index, chip_id);
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
-               /*
-                * GPIO3's are linked, and so both need to be toggled
+               /* GPIO3's are linked, and so both need to be toggled
                 * to obtain required 2us pulse.
                 */
                rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
@@ -12779,7 +12621,8 @@ static void bnx2x_check_over_curr(struct link_params *params,
 }
 
 static void bnx2x_analyze_link_error(struct link_params *params,
-                                    struct link_vars *vars, u32 lss_status)
+                                    struct link_vars *vars, u32 lss_status,
+                                    u8 notify)
 {
        struct bnx2x *bp = params->bp;
        /* Compare new value with previous value */
@@ -12793,8 +12636,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
        DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
                       half_open_conn, lss_status);
 
-       /*
-        * a. Update shmem->link_status accordingly
+       /* a. Update shmem->link_status accordingly
         * b. Update link_vars->link_up
         */
        if (lss_status) {
@@ -12802,8 +12644,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
                vars->link_status &= ~LINK_STATUS_LINK_UP;
                vars->link_up = 0;
                vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
-               /*
-                * Set LED mode to off since the PHY doesn't know about these
+
+               /* activate nig drain */
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+               /* Set LED mode to off since the PHY doesn't know about these
                 * errors
                 */
                led_mode = LED_MODE_OFF;
@@ -12813,7 +12657,11 @@ static void bnx2x_analyze_link_error(struct link_params *params,
                vars->link_up = 1;
                vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
                led_mode = LED_MODE_OPER;
+
+               /* Clear nig drain */
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
        }
+       bnx2x_sync_link(params, vars);
        /* Update the LED according to the link state */
        bnx2x_set_led(params, vars, led_mode, SPEED_10000);
 
@@ -12822,7 +12670,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
 
        /* C. Trigger General Attention */
        vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
-       bnx2x_notify_link_changed(bp);
+       if (notify)
+               bnx2x_notify_link_changed(bp);
 }
 
 /******************************************************************************
@@ -12834,22 +12683,23 @@ static void bnx2x_analyze_link_error(struct link_params *params,
 *      a fault, for example, due to break in the TX side of fiber.
 *
 ******************************************************************************/
-static void bnx2x_check_half_open_conn(struct link_params *params,
-                                      struct link_vars *vars)
+int bnx2x_check_half_open_conn(struct link_params *params,
+                               struct link_vars *vars,
+                               u8 notify)
 {
        struct bnx2x *bp = params->bp;
        u32 lss_status = 0;
        u32 mac_base;
        /* In case link status is physically up @ 10G do */
-       if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
-               return;
+       if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
+           (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
+               return 0;
 
        if (CHIP_IS_E3(bp) &&
            (REG_RD(bp, MISC_REG_RESET_REG_2) &
              (MISC_REGISTERS_RESET_REG_2_XMAC))) {
                /* Check E3 XMAC */
-               /*
-                * Note that link speed cannot be queried here, since it may be
+               /* Note that link speed cannot be queried here, since it may be
                 * zero while link is down. In case UMAC is active, LSS will
                 * simply not be set
                 */
@@ -12863,7 +12713,7 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
                if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
                        lss_status = 1;
 
-               bnx2x_analyze_link_error(params, vars, lss_status);
+               bnx2x_analyze_link_error(params, vars, lss_status, notify);
        } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
                   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
                /* Check E1X / E2 BMAC */
@@ -12880,18 +12730,21 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
                REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
                lss_status = (wb_data[0] > 0);
 
-               bnx2x_analyze_link_error(params, vars, lss_status);
+               bnx2x_analyze_link_error(params, vars, lss_status, notify);
        }
+       return 0;
 }
 
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
 {
-       struct bnx2x *bp = params->bp;
        u16 phy_idx;
+       struct bnx2x *bp = params->bp;
        for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
                if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
                        bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
-                       bnx2x_check_half_open_conn(params, vars);
+                       if (bnx2x_check_half_open_conn(params, vars, 1) !=
+                           0)
+                               DP(NETIF_MSG_LINK, "Fault detection failed\n");
                        break;
                }
        }
index 763535ee4832e3882a3ee62a6654eeac7d328ae6..00f26d319ba4b8b41288e8cf0e42edad5ec9152c 100644 (file)
@@ -256,6 +256,7 @@ struct link_params {
 #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY       (1<<3)
 #define FEATURE_CONFIG_AUTOGREEEN_ENABLED                      (1<<9)
 #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED             (1<<10)
+#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET                (1<<11)
        /* Will be populated during common init */
        struct bnx2x_phy phy[MAX_PHYS];
 
@@ -495,4 +496,6 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
 
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
 
+int bnx2x_check_half_open_conn(struct link_params *params,
+                              struct link_vars *vars, u8 notify);
 #endif /* BNX2X_LINK_H */
index e077d25087273721c0994b1c44dd5a41cceab882..1da25d796995d3b3dcb2ef153c94bb4cd131df69 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/time.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
-#include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -93,15 +92,11 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 
-static int multi_mode = 1;
-module_param(multi_mode, int, 0);
-MODULE_PARM_DESC(multi_mode, " Multi queue mode "
-                            "(0 Disable; 1 Enable (default))");
 
 int num_queues;
 module_param(num_queues, int, 0);
-MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
-                               " (default is as a number of CPUs)");
+MODULE_PARM_DESC(num_queues,
+                " Set number of queues (default is as a number of CPUs)");
 
 static int disable_tpa;
 module_param(disable_tpa, int, 0);
@@ -141,7 +136,9 @@ enum bnx2x_board_type {
        BCM57810,
        BCM57810_MF,
        BCM57840,
-       BCM57840_MF
+       BCM57840_MF,
+       BCM57811,
+       BCM57811_MF
 };
 
 /* indexed by board_type, above */
@@ -158,8 +155,9 @@ static struct {
        { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
        { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
        { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-       { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
-                                               "Ethernet Multi Function"}
+       { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
+       { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
+       { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -195,6 +193,12 @@ static struct {
 #ifndef PCI_DEVICE_ID_NX2_57840_MF
 #define PCI_DEVICE_ID_NX2_57840_MF     CHIP_NUM_57840_MF
 #endif
+#ifndef PCI_DEVICE_ID_NX2_57811
+#define PCI_DEVICE_ID_NX2_57811                CHIP_NUM_57811
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_MF
+#define PCI_DEVICE_ID_NX2_57811_MF     CHIP_NUM_57811_MF
+#endif
 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -207,6 +211,8 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
+       { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
        { 0 }
 };
 
@@ -309,67 +315,6 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 #define DMAE_DP_DST_PCI                "pci dst_addr [%x:%08x]"
 #define DMAE_DP_DST_NONE       "dst_addr [none]"
 
-static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
-                         int msglvl)
-{
-       u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
-
-       switch (dmae->opcode & DMAE_COMMAND_DST) {
-       case DMAE_CMD_DST_PCI:
-               if (src_type == DMAE_CMD_SRC_PCI)
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-                          dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               else
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%08x], len [%d*4], dst [%x:%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_lo >> 2,
-                          dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               break;
-       case DMAE_CMD_DST_GRC:
-               if (src_type == DMAE_CMD_SRC_PCI)
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-                          dmae->len, dmae->dst_addr_lo >> 2,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               else
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src [%08x], len [%d*4], dst [%08x]\n"
-                          "comp_addr [%x:%08x], comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_lo >> 2,
-                          dmae->len, dmae->dst_addr_lo >> 2,
-                          dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               break;
-       default:
-               if (src_type == DMAE_CMD_SRC_PCI)
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
-                          "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-                          dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               else
-                       DP(msglvl, "DMAE: opcode 0x%08x\n"
-                          "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
-                          "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-                          dmae->opcode, dmae->src_addr_lo >> 2,
-                          dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-                          dmae->comp_val);
-               break;
-       }
-
-}
 
 /* copy command into DMAE command memory and set DMAE command go */
 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -506,8 +451,6 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        dmae.dst_addr_hi = 0;
        dmae.len = len32;
 
-       bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
        /* issue the command and wait for completion */
        bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -540,8 +483,6 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
        dmae.len = len32;
 
-       bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
        /* issue the command and wait for completion */
        bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -562,27 +503,6 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
        bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 }
 
-/* used only for slowpath so not inlined */
-static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
-{
-       u32 wb_write[2];
-
-       wb_write[0] = val_hi;
-       wb_write[1] = val_lo;
-       REG_WR_DMAE(bp, reg, wb_write, 2);
-}
-
-#ifdef USE_WB_RD
-static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
-{
-       u32 wb_data[2];
-
-       REG_RD_DMAE(bp, reg, wb_data, 2);
-
-       return HILO_U64(wb_data[0], wb_data[1]);
-}
-#endif
-
 static int bnx2x_mc_assert(struct bnx2x *bp)
 {
        char last_idx;
@@ -1425,8 +1345,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
 static void bnx2x_igu_int_enable(struct bnx2x *bp)
 {
        u32 val;
-       int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-       int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+       bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+       bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+       bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
 
        val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 
@@ -1436,6 +1357,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
                val |= (IGU_PF_CONF_FUNC_EN |
                        IGU_PF_CONF_MSI_MSIX_EN |
                        IGU_PF_CONF_ATTN_BIT_EN);
+
+               if (single_msix)
+                       val |= IGU_PF_CONF_SINGLE_ISR_EN;
        } else if (msi) {
                val &= ~IGU_PF_CONF_INT_LINE_EN;
                val |= (IGU_PF_CONF_FUNC_EN |
@@ -1455,6 +1379,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
 
        REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 
+       if (val & IGU_PF_CONF_INT_LINE_EN)
+               pci_intx(bp->pdev, true);
+
        barrier();
 
        /* init leading/trailing edge */
@@ -2229,40 +2156,6 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
        return rc;
 }
 
-static void bnx2x_init_port_minmax(struct bnx2x *bp)
-{
-       u32 r_param = bp->link_vars.line_speed / 8;
-       u32 fair_periodic_timeout_usec;
-       u32 t_fair;
-
-       memset(&(bp->cmng.rs_vars), 0,
-              sizeof(struct rate_shaping_vars_per_port));
-       memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
-
-       /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
-       bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
-
-       /* this is the threshold below which no timer arming will occur
-          1.25 coefficient is for the threshold to be a little bigger
-          than the real time, to compensate for timer in-accuracy */
-       bp->cmng.rs_vars.rs_threshold =
-                               (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
-
-       /* resolution of fairness timer */
-       fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
-       /* for 10G it is 1000usec. for 1G it is 10000usec. */
-       t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
-
-       /* this is the threshold below which we won't arm the timer anymore */
-       bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
-
-       /* we multiply by 1e3/8 to get bytes/msec.
-          We don't want the credits to pass a credit
-          of the t_fair*FAIR_MEM (algorithm resolution) */
-       bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
-       /* since each tick is 4 usec */
-       bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
-}
 
 /* Calculates the sum of vn_min_rates.
    It's needed for further normalizing of the min_rates.
@@ -2273,12 +2166,12 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
      In the later case fainess algorithm should be deactivated.
      If not all min_rates are zero then those that are zeroes will be set to 1.
  */
-static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+static void bnx2x_calc_vn_min(struct bnx2x *bp,
+                                     struct cmng_init_input *input)
 {
        int all_zero = 1;
        int vn;
 
-       bp->vn_weight_sum = 0;
        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
                u32 vn_cfg = bp->mf_config[vn];
                u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
@@ -2286,106 +2179,56 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
 
                /* Skip hidden vns */
                if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
-                       continue;
-
+                       vn_min_rate = 0;
                /* If min rate is zero - set it to 1 */
-               if (!vn_min_rate)
+               else if (!vn_min_rate)
                        vn_min_rate = DEF_MIN_RATE;
                else
                        all_zero = 0;
 
-               bp->vn_weight_sum += vn_min_rate;
+               input->vnic_min_rate[vn] = vn_min_rate;
        }
 
        /* if ETS or all min rates are zeros - disable fairness */
        if (BNX2X_IS_ETS_ENABLED(bp)) {
-               bp->cmng.flags.cmng_enables &=
+               input->flags.cmng_enables &=
                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
                DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
        } else if (all_zero) {
-               bp->cmng.flags.cmng_enables &=
+               input->flags.cmng_enables &=
                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-               DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-                  "  fairness will be disabled\n");
+               DP(NETIF_MSG_IFUP,
+                  "All MIN values are zeroes fairness will be disabled\n");
        } else
-               bp->cmng.flags.cmng_enables |=
+               input->flags.cmng_enables |=
                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
+                                   struct cmng_init_input *input)
 {
-       struct rate_shaping_vars_per_vn m_rs_vn;
-       struct fairness_vars_per_vn m_fair_vn;
+       u16 vn_max_rate;
        u32 vn_cfg = bp->mf_config[vn];
-       int func = func_by_vn(bp, vn);
-       u16 vn_min_rate, vn_max_rate;
-       int i;
 
-       /* If function is hidden - set min and max to zeroes */
-       if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
-               vn_min_rate = 0;
+       if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
                vn_max_rate = 0;
-
-       } else {
+       else {
                u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-               vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-                               FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-               /* If fairness is enabled (not all min rates are zeroes) and
-                  if current min rate is zero - set it to 1.
-                  This is a requirement of the algorithm. */
-               if (bp->vn_weight_sum && (vn_min_rate == 0))
-                       vn_min_rate = DEF_MIN_RATE;
-
-               if (IS_MF_SI(bp))
+               if (IS_MF_SI(bp)) {
                        /* maxCfg in percents of linkspeed */
                        vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
-               else
+               } else /* SD modes */
                        /* maxCfg is absolute in 100Mb units */
                        vn_max_rate = maxCfg * 100;
        }
 
-       DP(NETIF_MSG_IFUP,
-          "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
-          func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
-
-       memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
-       memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
-
-       /* global vn counter - maximal Mbps for this vn */
-       m_rs_vn.vn_counter.rate = vn_max_rate;
-
-       /* quota - number of bytes transmitted in this period */
-       m_rs_vn.vn_counter.quota =
-                               (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
-
-       if (bp->vn_weight_sum) {
-               /* credit for each period of the fairness algorithm:
-                  number of bytes in T_FAIR (the vn share the port rate).
-                  vn_weight_sum should not be larger than 10000, thus
-                  T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
-                  than zero */
-               m_fair_vn.vn_credit_delta =
-                       max_t(u32, (vn_min_rate * (T_FAIR_COEF /
-                                                  (8 * bp->vn_weight_sum))),
-                             (bp->cmng.fair_vars.fair_threshold +
-                                                       MIN_ABOVE_THRESH));
-               DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
-                  m_fair_vn.vn_credit_delta);
-       }
-
-       /* Store it to internal memory */
-       for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
-               REG_WR(bp, BAR_XSTRORM_INTMEM +
-                      XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
-                      ((u32 *)(&m_rs_vn))[i]);
-
-       for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
-               REG_WR(bp, BAR_XSTRORM_INTMEM +
-                      XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
-                      ((u32 *)(&m_fair_vn))[i]);
+       DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
+
+       input->vnic_max_rate[vn] = vn_max_rate;
 }
 
+
 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
 {
        if (CHIP_REV_IS_SLOW(bp))
@@ -2427,34 +2270,31 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
 
 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 {
+       struct cmng_init_input input;
+       memset(&input, 0, sizeof(struct cmng_init_input));
+
+       input.port_rate = bp->link_vars.line_speed;
 
        if (cmng_type == CMNG_FNS_MINMAX) {
                int vn;
 
-               /* clear cmng_enables */
-               bp->cmng.flags.cmng_enables = 0;
-
                /* read mf conf from shmem */
                if (read_cfg)
                        bnx2x_read_mf_cfg(bp);
 
-               /* Init rate shaping and fairness contexts */
-               bnx2x_init_port_minmax(bp);
-
                /* vn_weight_sum and enable fairness if not 0 */
-               bnx2x_calc_vn_weight_sum(bp);
+               bnx2x_calc_vn_min(bp, &input);
 
                /* calculate and set min-max rate for each vn */
                if (bp->port.pmf)
                        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
-                               bnx2x_init_vn_minmax(bp, vn);
+                               bnx2x_calc_vn_max(bp, vn, &input);
 
                /* always enable rate shaping and fairness */
-               bp->cmng.flags.cmng_enables |=
+               input.flags.cmng_enables |=
                                        CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-               if (!bp->vn_weight_sum)
-                       DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-                                  "  fairness will be disabled\n");
+
+               bnx2x_init_cmng(&input, &bp->cmng);
                return;
        }
 
@@ -6640,13 +6480,16 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 {
        int reg;
+       u32 wb_write[2];
 
        if (CHIP_IS_E1(bp))
                reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
        else
                reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
 
-       bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+       wb_write[0] = ONCHIP_ADDR1(addr);
+       wb_write[1] = ONCHIP_ADDR2(addr);
+       REG_WR_DMAE(bp, reg, wb_write, 2);
 }
 
 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
@@ -7230,7 +7073,7 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
                BNX2X_DEV_INFO("set number of queues to 1\n");
                break;
        default:
-               /* Set number of queues according to bp->multi_mode value */
+               /* Set number of queues for MSI-X mode */
                bnx2x_set_num_queues(bp);
 
                BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
@@ -7239,15 +7082,17 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
                 * so try to enable MSI-X with the requested number of fp's
                 * and fallback to MSI or legacy INTx with one fp
                 */
-               if (bnx2x_enable_msix(bp)) {
-                       /* failed to enable MSI-X */
-                       BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n",
+               if (bnx2x_enable_msix(bp) ||
+                   bp->flags & USING_SINGLE_MSIX_FLAG) {
+                       /* failed to enable multiple MSI-X */
+                       BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
                                       bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
 
                        bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 
                        /* Try to enable MSI */
-                       if (!(bp->flags & DISABLE_MSI_FLAG))
+                       if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
+                           !(bp->flags & DISABLE_MSI_FLAG))
                                bnx2x_enable_msi(bp);
                }
                break;
@@ -9201,6 +9046,17 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        id |= (val & 0xf);
        bp->common.chip_id = id;
 
+       /* force 57811 according to MISC register */
+       if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
+               if (CHIP_IS_57810(bp))
+                       bp->common.chip_id = (CHIP_NUM_57811 << 16) |
+                               (bp->common.chip_id & 0x0000FFFF);
+               else if (CHIP_IS_57810_MF(bp))
+                       bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
+                               (bp->common.chip_id & 0x0000FFFF);
+               bp->common.chip_id |= 0x1;
+       }
+
        /* Set doorbell size */
        bp->db_size = (1 << BNX2X_DB_SHIFT);
 
@@ -10384,8 +10240,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
        if (BP_NOMCP(bp) && (func == 0))
                dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
 
-       bp->multi_mode = multi_mode;
-
        bp->disable_tpa = disable_tpa;
 
 #ifdef BCM_CNIC
@@ -11325,6 +11179,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        case BCM57810_MF:
        case BCM57840:
        case BCM57840_MF:
+       case BCM57811:
+       case BCM57811_MF:
                max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
                break;
 
index c25803b9c0ca3e3e91ecb69cc53f92124e0101c7..bbd387492a80c41202da85d1d7ed73419f38e510 100644 (file)
    starts at 0x0 for the A0 tape-out and increments by one for each
    all-layer tape-out. */
 #define MISC_REG_CHIP_REV                                       0xa40c
+/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
+ * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
+ * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
+#define MISC_REG_CHIP_TYPE                                      0xac60
+#define MISC_REG_CHIP_TYPE_57811_MASK                           (1<<1)
 /* [RW 32] The following driver registers(1...16) represent 16 drivers and
    32 clients. Each client can be controlled by one driver only. One in each
    bit represent that this driver control the appropriate client (Ex: bit 5
index 5135733216252e2e05767515d1f0893c9715047d..553b9877339e0cf77d3cfe52fbdbe46505437994 100644 (file)
@@ -4090,12 +4090,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
                rss_mode = ETH_RSS_MODE_DISABLED;
        else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
                rss_mode = ETH_RSS_MODE_REGULAR;
-       else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
-               rss_mode = ETH_RSS_MODE_VLAN_PRI;
-       else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
-               rss_mode = ETH_RSS_MODE_E1HOV_PRI;
-       else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
-               rss_mode = ETH_RSS_MODE_IP_DSCP;
 
        data->rss_mode = rss_mode;
 
index 61a7670adfcd743486720eb9f6aa755e8926ee3c..dee2f372a974a3820318ed45c099a9b69388a0d3 100644 (file)
@@ -685,9 +685,6 @@ enum {
        /* RSS_MODE bits are mutually exclusive */
        BNX2X_RSS_MODE_DISABLED,
        BNX2X_RSS_MODE_REGULAR,
-       BNX2X_RSS_MODE_VLAN_PRI,
-       BNX2X_RSS_MODE_E1HOV_PRI,
-       BNX2X_RSS_MODE_IP_DSCP,
 
        BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
 
index 062ac333fde60be0732572f39e0a7a46335da499..0c3e7c70ffbc72d614774553c35a61df084e9045 100644 (file)
@@ -12234,6 +12234,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
        .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
        .get_rxfh_indir         = tg3_get_rxfh_indir,
        .set_rxfh_indir         = tg3_set_rxfh_indir,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
index 77977d735dd7167dc8fe1f2690c87912ce1ecf0e..0b640fafbda359d20a43cac67ae561e7451f5a9b 100644 (file)
@@ -70,7 +70,6 @@ static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
 static void bfa_ioc_recover(struct bfa_ioc *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
@@ -346,8 +345,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
        switch (event) {
        case IOC_E_FWRSP_GETATTR:
                del_timer(&ioc->ioc_timer);
-               bfa_ioc_check_attr_wwns(ioc);
-               bfa_ioc_hb_monitor(ioc);
                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
                break;
 
@@ -380,6 +377,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
 {
        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
        bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+       bfa_ioc_hb_monitor(ioc);
 }
 
 static void
@@ -1207,27 +1205,62 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
        writel(1, sem_reg);
 }
 
+/* Clear fwver hdr */
+static void
+bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
+{
+       u32 pgnum, pgoff, loff = 0;
+       int i;
+
+       pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+       pgoff = PSS_SMEM_PGOFF(loff);
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+       for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
+               writel(0, ioc->ioc_regs.smem_page_start + loff);
+               loff += sizeof(u32);
+       }
+}
+
+
 static void
 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
 {
        struct bfi_ioc_image_hdr fwhdr;
-       u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+       u32 fwstate, r32;
 
-       if (fwstate == BFI_IOC_UNINIT)
+       /* Spin on init semaphore to serialize. */
+       r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
+       while (r32 & 0x1) {
+               udelay(20);
+               r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
+       }
+
+       fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+       if (fwstate == BFI_IOC_UNINIT) {
+               writel(1, ioc->ioc_regs.ioc_init_sem_reg);
                return;
+       }
 
        bfa_nw_ioc_fwver_get(ioc, &fwhdr);
 
-       if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+       if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+               writel(1, ioc->ioc_regs.ioc_init_sem_reg);
                return;
+       }
 
+       bfa_ioc_fwver_clear(ioc);
        writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+       writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
 
        /*
         * Try to lock and then unlock the semaphore.
         */
        readl(ioc->ioc_regs.ioc_sem_reg);
        writel(1, ioc->ioc_regs.ioc_sem_reg);
+
+       /* Unlock init semaphore */
+       writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 }
 
 static void
@@ -1585,11 +1618,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
        u32 i;
        u32 asicmode;
 
-       /**
-        * Initialize LMEM first before code download
-        */
-       bfa_ioc_lmem_init(ioc);
-
        fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1914,6 +1942,10 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
        bfa_ioc_pll_init_asic(ioc);
 
        ioc->pllinit = true;
+
+       /* Initialize LMEM */
+       bfa_ioc_lmem_init(ioc);
+
        /*
         *  release semaphore.
         */
@@ -2513,13 +2545,6 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
-static void
-bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
-{
-       if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
-               return;
-}
-
 /**
  * @dg hal_iocpf_pvt BFA IOC PF private functions
  * @{
index 348479bbfa3a42b953a111bb06c31cec5e4917a8..b6b036a143ae981b53aa055cedb23860728ac3fa 100644 (file)
@@ -199,9 +199,9 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
  * Host to LPU mailbox message addresses
  */
 static const struct {
-       u32     hfn_mbox;
-       u32     lpu_mbox;
-       u32     hfn_pgn;
+       u32     hfn_mbox;
+       u32     lpu_mbox;
+       u32     hfn_pgn;
 } ct_fnreg[] = {
        { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
        { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
@@ -803,17 +803,72 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
 }
 
 #define CT2_NFC_MAX_DELAY       1000
+#define CT2_NFC_VER_VALID       0x143
+#define BFA_IOC_PLL_POLL        1000000
+
+static bool
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+       volatile u32 r32;
+
+       r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+       if (r32 & __NFC_CONTROLLER_HALTED)
+               return true;
+
+       return false;
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+       volatile u32 r32;
+       int i;
+
+       writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+       for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+               r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+               if (!(r32 & __NFC_CONTROLLER_HALTED))
+                       return;
+               udelay(1000);
+       }
+       BUG_ON(1);
+}
+
 static enum bfa_status
 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
 {
        volatile u32 wgn, r32;
-       int i;
+       u32 nfc_ver, i;
 
-       /*
-        * Initialize PLL if not already done by NFC
-        */
        wgn = readl(rb + CT2_WGN_STATUS);
-       if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+
+       nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+       if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
+               (nfc_ver >= CT2_NFC_VER_VALID)) {
+               if (bfa_ioc_ct2_nfc_halted(rb))
+                       bfa_ioc_ct2_nfc_resume(rb);
+               writel(__RESET_AND_START_SCLK_LCLK_PLLS,
+                               rb + CT2_CSI_FW_CTL_SET_REG);
+
+               for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+                       r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+                       if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
+                               break;
+               }
+               BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+
+               for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+                       r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+                       if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
+                               break;
+               }
+               BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+               udelay(1000);
+
+               r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+               BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+       } else {
                writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
                for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
                        r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -821,53 +876,48 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
                                break;
                        udelay(1000);
                }
+
+               bfa_ioc_ct2_mac_reset(rb);
+               bfa_ioc_ct2_sclk_init(rb);
+               bfa_ioc_ct2_lclk_init(rb);
+
+               /* release soft reset on s_clk & l_clk */
+               r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+               writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                               rb + CT2_APP_PLL_SCLK_CTL_REG);
+               r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+               writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                               rb + CT2_APP_PLL_LCLK_CTL_REG);
+       }
+
+       /* Announce flash device presence, if flash was corrupted. */
+       if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+               r32 = readl((rb + PSS_GPIO_OUT_REG));
+               writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
+               r32 = readl((rb + PSS_GPIO_OE_REG));
+               writel(r32 | 1, rb + PSS_GPIO_OE_REG);
        }
 
        /*
         * Mask the interrupts and clear any
         * pending interrupts left by BIOS/EFI
         */
-
        writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
        writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
 
-       r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-       if (r32 == 1) {
-               writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
-               readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-       }
-       r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-       if (r32 == 1) {
-               writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
-               readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-       }
-
-       bfa_ioc_ct2_mac_reset(rb);
-       bfa_ioc_ct2_sclk_init(rb);
-       bfa_ioc_ct2_lclk_init(rb);
-
-       /*
-        * release soft reset on s_clk & l_clk
-        */
-       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
-       writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
-                       (rb + CT2_APP_PLL_SCLK_CTL_REG));
-
-       /*
-        * release soft reset on s_clk & l_clk
-        */
-       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-       writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
-                     (rb + CT2_APP_PLL_LCLK_CTL_REG));
-
-       /*
-        * Announce flash device presence, if flash was corrupted.
-        */
-       if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
-               r32 = readl((rb + PSS_GPIO_OUT_REG));
-               writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
-               r32 = readl((rb + PSS_GPIO_OE_REG));
-               writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
+       /* For first time initialization, no need to clear interrupts */
+       r32 = readl(rb + HOST_SEM5_REG);
+       if (r32 & 0x1) {
+               r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+               if (r32 == 1) {
+                       writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+                       readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+               }
+               r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+               if (r32 == 1) {
+                       writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+                       readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+               }
        }
 
        bfa_ioc_ct2_mem_init(rb);
index efacff3ab51d5799f9cb392d971bddc590e0bf47..0e094fe46dfd1ab3c8c3d3088e3b550a9dd39336 100644 (file)
@@ -339,10 +339,16 @@ enum {
 #define __A2T_AHB_LOAD                 0x00000800
 #define __WGN_READY                    0x00000400
 #define __GLBL_PF_VF_CFG_RDY           0x00000200
+#define CT2_NFC_CSR_CLR_REG             0x00027420
 #define CT2_NFC_CSR_SET_REG            0x00027424
 #define __HALT_NFC_CONTROLLER          0x00000002
 #define __NFC_CONTROLLER_HALTED                0x00001000
 
+#define CT2_RSC_GPR15_REG              0x0002765c
+#define CT2_CSI_FW_CTL_REG              0x00027080
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
+#define CT2_CSI_FW_CTL_SET_REG          0x00027088
+
 #define CT2_CSI_MAC0_CONTROL_REG       0x000270d0
 #define __CSI_MAC_RESET                        0x00000010
 #define __CSI_MAC_AHB_RESET            0x00000008
index ff78f770dec91e12e9a816c413b768d31481acd4..25c4e7f2a099f18eb4e27cc2b9a2ee34578281ee 100644 (file)
@@ -80,8 +80,6 @@ do {                                                          \
        (sizeof(struct bnad_skb_unmap) * ((_depth) - 1));       \
 } while (0)
 
-#define BNAD_TXRX_SYNC_MDELAY  250     /* 250 msecs */
-
 static void
 bnad_add_to_list(struct bnad *bnad)
 {
@@ -103,7 +101,7 @@ bnad_remove_from_list(struct bnad *bnad)
  * Reinitialize completions in CQ, once Rx is taken down
  */
 static void
-bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
+bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
 {
        struct bna_cq_entry *cmpl, *next_cmpl;
        unsigned int wi_range, wis = 0, ccb_prod = 0;
@@ -141,7 +139,8 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
 
        for (j = 0; j < frag; j++) {
                dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
-                         skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
+                         skb_frag_size(&skb_shinfo(skb)->frags[j]),
+                                               DMA_TO_DEVICE);
                dma_unmap_addr_set(&array[index], dma_addr, 0);
                BNA_QE_INDX_ADD(index, 1, depth);
        }
@@ -155,7 +154,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
  * so DMA unmap & freeing is fine.
  */
 static void
-bnad_free_all_txbufs(struct bnad *bnad,
+bnad_txq_cleanup(struct bnad *bnad,
                 struct bna_tcb *tcb)
 {
        u32             unmap_cons;
@@ -183,13 +182,12 @@ bnad_free_all_txbufs(struct bnad *bnad,
 /* Data Path Handlers */
 
 /*
- * bnad_free_txbufs : Frees the Tx bufs on Tx completion
+ * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
  * Can be called in a) Interrupt context
  *                 b) Sending context
- *                 c) Tasklet context
  */
 static u32
-bnad_free_txbufs(struct bnad *bnad,
+bnad_txcmpl_process(struct bnad *bnad,
                 struct bna_tcb *tcb)
 {
        u32             unmap_cons, sent_packets = 0, sent_bytes = 0;
@@ -198,13 +196,7 @@ bnad_free_txbufs(struct bnad *bnad,
        struct bnad_skb_unmap *unmap_array;
        struct sk_buff          *skb;
 
-       /*
-        * Just return if TX is stopped. This check is useful
-        * when bnad_free_txbufs() runs out of a tasklet scheduled
-        * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
-        * but this routine runs actually after the cleanup has been
-        * executed.
-        */
+       /* Just return if TX is stopped */
        if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
                return 0;
 
@@ -243,57 +235,8 @@ bnad_free_txbufs(struct bnad *bnad,
        return sent_packets;
 }
 
-/* Tx Free Tasklet function */
-/* Frees for all the tcb's in all the Tx's */
-/*
- * Scheduled from sending context, so that
- * the fat Tx lock is not held for too long
- * in the sending context.
- */
-static void
-bnad_tx_free_tasklet(unsigned long bnad_ptr)
-{
-       struct bnad *bnad = (struct bnad *)bnad_ptr;
-       struct bna_tcb *tcb;
-       u32             acked = 0;
-       int                     i, j;
-
-       for (i = 0; i < bnad->num_tx; i++) {
-               for (j = 0; j < bnad->num_txq_per_tx; j++) {
-                       tcb = bnad->tx_info[i].tcb[j];
-                       if (!tcb)
-                               continue;
-                       if (((u16) (*tcb->hw_consumer_index) !=
-                               tcb->consumer_index) &&
-                               (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
-                                                 &tcb->flags))) {
-                               acked = bnad_free_txbufs(bnad, tcb);
-                               if (likely(test_bit(BNAD_TXQ_TX_STARTED,
-                                       &tcb->flags)))
-                                       bna_ib_ack(tcb->i_dbell, acked);
-                               smp_mb__before_clear_bit();
-                               clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
-                       }
-                       if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
-                                               &tcb->flags)))
-                               continue;
-                       if (netif_queue_stopped(bnad->netdev)) {
-                               if (acked && netif_carrier_ok(bnad->netdev) &&
-                                       BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
-                                               BNAD_NETIF_WAKE_THRESHOLD) {
-                                       netif_wake_queue(bnad->netdev);
-                                       /* TODO */
-                                       /* Counters for individual TxQs? */
-                                       BNAD_UPDATE_CTR(bnad,
-                                               netif_queue_wakeup);
-                               }
-                       }
-               }
-       }
-}
-
 static u32
-bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
 {
        struct net_device *netdev = bnad->netdev;
        u32 sent = 0;
@@ -301,7 +244,7 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
        if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
                return 0;
 
-       sent = bnad_free_txbufs(bnad, tcb);
+       sent = bnad_txcmpl_process(bnad, tcb);
        if (sent) {
                if (netif_queue_stopped(netdev) &&
                    netif_carrier_ok(netdev) &&
@@ -330,13 +273,13 @@ bnad_msix_tx(int irq, void *data)
        struct bna_tcb *tcb = (struct bna_tcb *)data;
        struct bnad *bnad = tcb->bnad;
 
-       bnad_tx(bnad, tcb);
+       bnad_tx_complete(bnad, tcb);
 
        return IRQ_HANDLED;
 }
 
 static void
-bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 {
        struct bnad_unmap_q *unmap_q = rcb->unmap_q;
 
@@ -348,7 +291,7 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
 }
 
 static void
-bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 {
        struct bnad_unmap_q *unmap_q;
        struct bnad_skb_unmap *unmap_array;
@@ -369,11 +312,11 @@ bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
                                 DMA_FROM_DEVICE);
                dev_kfree_skb(skb);
        }
-       bnad_reset_rcb(bnad, rcb);
+       bnad_rcb_cleanup(bnad, rcb);
 }
 
 static void
-bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 {
        u16 to_alloc, alloced, unmap_prod, wi_range;
        struct bnad_unmap_q *unmap_q = rcb->unmap_q;
@@ -434,14 +377,14 @@ bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
        if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
                if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
                         >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
-                       bnad_alloc_n_post_rxbufs(bnad, rcb);
+                       bnad_rxq_post(bnad, rcb);
                smp_mb__before_clear_bit();
                clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
        }
 }
 
 static u32
-bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 {
        struct bna_cq_entry *cmpl, *next_cmpl;
        struct bna_rcb *rcb = NULL;
@@ -453,12 +396,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
        struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
        struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
 
-       set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
-
-       if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
-               clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+       if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
                return 0;
-       }
 
        prefetch(bnad->netdev);
        BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -533,9 +472,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 
                if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                        napi_gro_receive(&rx_ctrl->napi, skb);
-               else {
+               else
                        netif_receive_skb(skb);
-               }
 
 next:
                cmpl->valid = 0;
@@ -646,7 +584,7 @@ bnad_isr(int irq, void *data)
                for (j = 0; j < bnad->num_txq_per_tx; j++) {
                        tcb = bnad->tx_info[i].tcb[j];
                        if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
-                               bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+                               bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
                }
        }
        /* Rx processing */
@@ -839,20 +777,9 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
 {
        struct bnad_tx_info *tx_info =
                        (struct bnad_tx_info *)tcb->txq->tx->priv;
-       struct bnad_unmap_q *unmap_q = tcb->unmap_q;
-
-       while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-               cpu_relax();
-
-       bnad_free_all_txbufs(bnad, tcb);
-
-       unmap_q->producer_index = 0;
-       unmap_q->consumer_index = 0;
-
-       smp_mb__before_clear_bit();
-       clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 
        tx_info->tcb[tcb->id] = NULL;
+       tcb->priv = NULL;
 }
 
 static void
@@ -865,12 +792,6 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
        unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
 }
 
-static void
-bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
-{
-       bnad_free_all_rxbufs(bnad, rcb);
-}
-
 static void
 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
 {
@@ -916,7 +837,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
 {
        struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
        struct bna_tcb *tcb;
-       struct bnad_unmap_q *unmap_q;
        u32 txq_id;
        int i;
 
@@ -926,23 +846,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
                        continue;
                txq_id = tcb->id;
 
-               unmap_q = tcb->unmap_q;
-
-               if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
-                       continue;
-
-               while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-                       cpu_relax();
-
-               bnad_free_all_txbufs(bnad, tcb);
-
-               unmap_q->producer_index = 0;
-               unmap_q->consumer_index = 0;
-
-               smp_mb__before_clear_bit();
-               clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
-
+               BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
                set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+               BUG_ON(*(tcb->hw_consumer_index) != 0);
 
                if (netif_carrier_ok(bnad->netdev)) {
                        printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
@@ -963,6 +869,54 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
        }
 }
 
+/*
+ * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
+ */
+static void
+bnad_tx_cleanup(struct delayed_work *work)
+{
+       struct bnad_tx_info *tx_info =
+               container_of(work, struct bnad_tx_info, tx_cleanup_work);
+       struct bnad *bnad = NULL;
+       struct bnad_unmap_q *unmap_q;
+       struct bna_tcb *tcb;
+       unsigned long flags;
+       uint32_t i, pending = 0;
+
+       for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+               tcb = tx_info->tcb[i];
+               if (!tcb)
+                       continue;
+
+               bnad = tcb->bnad;
+
+               if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+                       pending++;
+                       continue;
+               }
+
+               bnad_txq_cleanup(bnad, tcb);
+
+               unmap_q = tcb->unmap_q;
+               unmap_q->producer_index = 0;
+               unmap_q->consumer_index = 0;
+
+               smp_mb__before_clear_bit();
+               clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+       }
+
+       if (pending) {
+               queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
+                       msecs_to_jiffies(1));
+               return;
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bna_tx_cleanup_complete(tx_info->tx);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+
 static void
 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
 {
@@ -976,8 +930,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
                        continue;
        }
 
-       mdelay(BNAD_TXRX_SYNC_MDELAY);
-       bna_tx_cleanup_complete(tx);
+       queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
 }
 
 static void
@@ -1001,6 +954,44 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
        }
 }
 
+/*
+ * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
+ */
+static void
+bnad_rx_cleanup(void *work)
+{
+       struct bnad_rx_info *rx_info =
+               container_of(work, struct bnad_rx_info, rx_cleanup_work);
+       struct bnad_rx_ctrl *rx_ctrl;
+       struct bnad *bnad = NULL;
+       unsigned long flags;
+       uint32_t i;
+
+       for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+               rx_ctrl = &rx_info->rx_ctrl[i];
+
+               if (!rx_ctrl->ccb)
+                       continue;
+
+               bnad = rx_ctrl->ccb->bnad;
+
+               /*
+                * Wait till the poll handler has exited
+                * and nothing can be scheduled anymore
+                */
+               napi_disable(&rx_ctrl->napi);
+
+               bnad_cq_cleanup(bnad, rx_ctrl->ccb);
+               bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
+               if (rx_ctrl->ccb->rcb[1])
+                       bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bna_rx_cleanup_complete(rx_info->rx);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
 static void
 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
 {
@@ -1009,8 +1000,6 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
        struct bnad_rx_ctrl *rx_ctrl;
        int i;
 
-       mdelay(BNAD_TXRX_SYNC_MDELAY);
-
        for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
                rx_ctrl = &rx_info->rx_ctrl[i];
                ccb = rx_ctrl->ccb;
@@ -1021,12 +1010,9 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
 
                if (ccb->rcb[1])
                        clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
-
-               while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
-                       cpu_relax();
        }
 
-       bna_rx_cleanup_complete(rx);
+       queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
 }
 
 static void
@@ -1046,13 +1032,12 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
                if (!ccb)
                        continue;
 
-               bnad_cq_cmpl_init(bnad, ccb);
+               napi_enable(&rx_ctrl->napi);
 
                for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
                        rcb = ccb->rcb[j];
                        if (!rcb)
                                continue;
-                       bnad_free_all_rxbufs(bnad, rcb);
 
                        set_bit(BNAD_RXQ_STARTED, &rcb->flags);
                        set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
@@ -1063,7 +1048,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
                        if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
                                if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
                                        >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
-                                       bnad_alloc_n_post_rxbufs(bnad, rcb);
+                                       bnad_rxq_post(bnad, rcb);
                                        smp_mb__before_clear_bit();
                                clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
                        }
@@ -1687,7 +1672,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
        if (!netif_carrier_ok(bnad->netdev))
                goto poll_exit;
 
-       rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+       rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
        if (rcvd >= budget)
                return rcvd;
 
@@ -1704,7 +1689,7 @@ poll_exit:
 
 #define BNAD_NAPI_POLL_QUOTA           64
 static void
-bnad_napi_init(struct bnad *bnad, u32 rx_id)
+bnad_napi_add(struct bnad *bnad, u32 rx_id)
 {
        struct bnad_rx_ctrl *rx_ctrl;
        int i;
@@ -1718,34 +1703,18 @@ bnad_napi_init(struct bnad *bnad, u32 rx_id)
 }
 
 static void
-bnad_napi_enable(struct bnad *bnad, u32 rx_id)
-{
-       struct bnad_rx_ctrl *rx_ctrl;
-       int i;
-
-       /* Initialize & enable NAPI */
-       for (i = 0; i < bnad->num_rxp_per_rx; i++) {
-               rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
-
-               napi_enable(&rx_ctrl->napi);
-       }
-}
-
-static void
-bnad_napi_disable(struct bnad *bnad, u32 rx_id)
+bnad_napi_delete(struct bnad *bnad, u32 rx_id)
 {
        int i;
 
        /* First disable and then clean up */
-       for (i = 0; i < bnad->num_rxp_per_rx; i++) {
-               napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+       for (i = 0; i < bnad->num_rxp_per_rx; i++)
                netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
-       }
 }
 
 /* Should be held with conf_lock held */
 void
-bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
+bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
 {
        struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
        struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1764,9 +1733,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
                bnad_tx_msix_unregister(bnad, tx_info,
                        bnad->num_txq_per_tx);
 
-       if (0 == tx_id)
-               tasklet_kill(&bnad->tx_free_tasklet);
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_tx_destroy(tx_info->tx);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1832,6 +1798,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
                goto err_return;
        tx_info->tx = tx;
 
+       INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
+                       (work_func_t)bnad_tx_cleanup);
+
        /* Register ISR for the Tx object */
        if (intr_info->intr_type == BNA_INTR_T_MSIX) {
                err = bnad_tx_msix_register(bnad, tx_info,
@@ -1896,7 +1865,7 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
 
 /* Called with mutex_lock(&bnad->conf_mutex) held */
 void
-bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
+bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
 {
        struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
        struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
@@ -1928,7 +1897,7 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
        if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
                bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
 
-       bnad_napi_disable(bnad, rx_id);
+       bnad_napi_delete(bnad, rx_id);
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_rx_destroy(rx_info->rx);
@@ -1952,7 +1921,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
        struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
        static const struct bna_rx_event_cbfn rx_cbfn = {
                .rcb_setup_cbfn = bnad_cb_rcb_setup,
-               .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
+               .rcb_destroy_cbfn = NULL,
                .ccb_setup_cbfn = bnad_cb_ccb_setup,
                .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
                .rx_stall_cbfn = bnad_cb_rx_stall,
@@ -1998,11 +1967,14 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
        rx_info->rx = rx;
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
+       INIT_WORK(&rx_info->rx_cleanup_work,
+                       (work_func_t)(bnad_rx_cleanup));
+
        /*
         * Init NAPI, so that state is set to NAPI_STATE_SCHED,
         * so that IRQ handler cannot schedule NAPI at this point.
         */
-       bnad_napi_init(bnad, rx_id);
+       bnad_napi_add(bnad, rx_id);
 
        /* Register ISR for the Rx object */
        if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2028,13 +2000,10 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
        bna_rx_enable(rx);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-       /* Enable scheduling of NAPI */
-       bnad_napi_enable(bnad, rx_id);
-
        return 0;
 
 err_return:
-       bnad_cleanup_rx(bnad, rx_id);
+       bnad_destroy_rx(bnad, rx_id);
        return err;
 }
 
@@ -2519,7 +2488,7 @@ bnad_open(struct net_device *netdev)
        return 0;
 
 cleanup_tx:
-       bnad_cleanup_tx(bnad, 0);
+       bnad_destroy_tx(bnad, 0);
 
 err_return:
        mutex_unlock(&bnad->conf_mutex);
@@ -2546,8 +2515,8 @@ bnad_stop(struct net_device *netdev)
 
        wait_for_completion(&bnad->bnad_completions.enet_comp);
 
-       bnad_cleanup_tx(bnad, 0);
-       bnad_cleanup_rx(bnad, 0);
+       bnad_destroy_tx(bnad, 0);
+       bnad_destroy_rx(bnad, 0);
 
        /* Synchronize mailbox IRQ */
        bnad_mbox_irq_sync(bnad);
@@ -2620,7 +2589,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                if ((u16) (*tcb->hw_consumer_index) !=
                    tcb->consumer_index &&
                    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
-                       acked = bnad_free_txbufs(bnad, tcb);
+                       acked = bnad_txcmpl_process(bnad, tcb);
                        if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
                                bna_ib_ack(tcb->i_dbell, acked);
                        smp_mb__before_clear_bit();
@@ -2843,9 +2812,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        bna_txq_prod_indx_doorbell(tcb);
        smp_mb();
 
-       if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
-               tasklet_schedule(&bnad->tx_free_tasklet);
-
        return NETDEV_TX_OK;
 }
 
@@ -3127,8 +3093,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
 /*
  * 1. Initialize the bnad structure
  * 2. Setup netdev pointer in pci_dev
- * 3. Initialze Tx free tasklet
- * 4. Initialize no. of TxQ & CQs & MSIX vectors
+ * 3. Initialize no. of TxQ & CQs & MSIX vectors
+ * 4. Initialize work queue.
  */
 static int
 bnad_init(struct bnad *bnad,
@@ -3171,8 +3137,11 @@ bnad_init(struct bnad *bnad,
        bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
        bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
 
-       tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
-                    (unsigned long)bnad);
+       sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
+       bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
+
+       if (!bnad->work_q)
+               return -ENOMEM;
 
        return 0;
 }
@@ -3185,6 +3154,12 @@ bnad_init(struct bnad *bnad,
 static void
 bnad_uninit(struct bnad *bnad)
 {
+       if (bnad->work_q) {
+               flush_workqueue(bnad->work_q);
+               destroy_workqueue(bnad->work_q);
+               bnad->work_q = NULL;
+       }
+
        if (bnad->bar0)
                iounmap(bnad->bar0);
        pci_set_drvdata(bnad->pcidev, NULL);
@@ -3304,7 +3279,6 @@ bnad_pci_probe(struct pci_dev *pdev,
        /*
         * Initialize bnad structure
         * Setup relation between pci_dev & netdev
-        * Init Tx free tasklet
         */
        err = bnad_init(bnad, pdev, netdev);
        if (err)
index 55824d92699f43cb390c860ce3295f9275f858f6..72742be112777dc01ee388aeab4b2c5be5668b66 100644 (file)
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
 #define BNAD_NAME                      "bna"
 #define BNAD_NAME_LEN                  64
 
-#define BNAD_VERSION                   "3.0.2.2"
+#define BNAD_VERSION                   "3.0.23.0"
 
 #define BNAD_MAILBOX_MSIX_INDEX                0
 #define BNAD_MAILBOX_MSIX_VECTORS      1
@@ -210,6 +210,7 @@ struct bnad_tx_info {
        struct bna_tx *tx; /* 1:1 between tx_info & tx */
        struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
        u32 tx_id;
+       struct delayed_work tx_cleanup_work;
 } ____cacheline_aligned;
 
 struct bnad_rx_info {
@@ -217,6 +218,7 @@ struct bnad_rx_info {
 
        struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
        u32 rx_id;
+       struct work_struct rx_cleanup_work;
 } ____cacheline_aligned;
 
 /* Unmap queues for Tx / Rx cleanup */
@@ -318,7 +320,7 @@ struct bnad {
        /* Burnt in MAC address */
        mac_t                   perm_addr;
 
-       struct tasklet_struct   tx_free_tasklet;
+       struct workqueue_struct *work_q;
 
        /* Statistics */
        struct bnad_stats stats;
@@ -328,6 +330,7 @@ struct bnad {
        char                    adapter_name[BNAD_NAME_LEN];
        char                    port_name[BNAD_NAME_LEN];
        char                    mbox_irq_name[BNAD_NAME_LEN];
+       char                    wq_name[BNAD_NAME_LEN];
 
        /* debugfs specific data */
        char    *regdata;
@@ -370,8 +373,8 @@ extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
 
 extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
 extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
+extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
 
 /* Timer start/stop protos */
 extern void bnad_dim_timer_start(struct bnad *bnad);
index ab753d7334a62bb61616276a34a1aa51654f93b7..40e1e84f498473f6a0c53b433acddfae1e4a012b 100644 (file)
@@ -464,7 +464,7 @@ bnad_set_ringparam(struct net_device *netdev,
                for (i = 0; i < bnad->num_rx; i++) {
                        if (!bnad->rx_info[i].rx)
                                continue;
-                       bnad_cleanup_rx(bnad, i);
+                       bnad_destroy_rx(bnad, i);
                        current_err = bnad_setup_rx(bnad, i);
                        if (current_err && !err)
                                err = current_err;
@@ -492,7 +492,7 @@ bnad_set_ringparam(struct net_device *netdev,
                for (i = 0; i < bnad->num_tx; i++) {
                        if (!bnad->tx_info[i].tx)
                                continue;
-                       bnad_cleanup_tx(bnad, i);
+                       bnad_destroy_tx(bnad, i);
                        current_err = bnad_setup_tx(bnad, i);
                        if (current_err && !err)
                                err = current_err;
@@ -539,7 +539,7 @@ bnad_set_pauseparam(struct net_device *netdev,
 }
 
 static void
-bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
 {
        struct bnad *bnad = netdev_priv(netdev);
        int i, j, q_num;
index c4834c23be35bb7715ab4698897007267cf34f6d..1466bc4e3dda8df7edf13ef522e242113d5962b4 100644 (file)
@@ -1213,6 +1213,7 @@ static const struct ethtool_ops macb_ethtool_ops = {
        .set_settings           = macb_set_settings,
        .get_drvinfo            = macb_get_drvinfo,
        .get_link               = ethtool_op_get_link,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index 77b4e873f91c04fa8f6d3d3853c63513c1ea2af0..d7ac6c17547c9ad8f1cacc1db6de7f1e32264bb4 100644 (file)
@@ -1193,18 +1193,16 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
        if (err)
                return err;
 
-       NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request);
-       NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
-       if (pp->set & ENIC_SET_NAME)
-               NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
-                       pp->name);
-       if (pp->set & ENIC_SET_INSTANCE)
-               NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
-                       pp->instance_uuid);
-       if (pp->set & ENIC_SET_HOST)
-               NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
-                       pp->host_uuid);
-
+       if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
+           nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
+           ((pp->set & ENIC_SET_NAME) &&
+            nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
+           ((pp->set & ENIC_SET_INSTANCE) &&
+            nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+                    pp->instance_uuid)) ||
+           ((pp->set & ENIC_SET_HOST) &&
+            nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 68f1c39184df6581ac84f4c249f27a6999caa07c..61cc0934286582844279338f42d7b80fec942b81 100644 (file)
@@ -1380,6 +1380,7 @@ static void de_free_rings (struct de_private *de)
 static int de_open (struct net_device *dev)
 {
        struct de_private *de = netdev_priv(dev);
+       const int irq = de->pdev->irq;
        int rc;
 
        netif_dbg(de, ifup, dev, "enabling interface\n");
@@ -1394,10 +1395,9 @@ static int de_open (struct net_device *dev)
 
        dw32(IntrMask, 0);
 
-       rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
+       rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
        if (rc) {
-               netdev_err(dev, "IRQ %d request failure, err=%d\n",
-                          dev->irq, rc);
+               netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
                goto err_out_free;
        }
 
@@ -1413,7 +1413,7 @@ static int de_open (struct net_device *dev)
        return 0;
 
 err_out_free_irq:
-       free_irq(dev->irq, dev);
+       free_irq(irq, dev);
 err_out_free:
        de_free_rings(de);
        return rc;
@@ -1434,7 +1434,7 @@ static int de_close (struct net_device *dev)
        netif_carrier_off(dev);
        spin_unlock_irqrestore(&de->lock, flags);
 
-       free_irq(dev->irq, dev);
+       free_irq(de->pdev->irq, dev);
 
        de_free_rings(de);
        de_adapter_sleep(de);
@@ -1444,6 +1444,7 @@ static int de_close (struct net_device *dev)
 static void de_tx_timeout (struct net_device *dev)
 {
        struct de_private *de = netdev_priv(dev);
+       const int irq = de->pdev->irq;
 
        netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
                   dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
@@ -1451,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev)
 
        del_timer_sync(&de->media_timer);
 
-       disable_irq(dev->irq);
+       disable_irq(irq);
        spin_lock_irq(&de->lock);
 
        de_stop_hw(de);
@@ -1459,12 +1460,12 @@ static void de_tx_timeout (struct net_device *dev)
        netif_carrier_off(dev);
 
        spin_unlock_irq(&de->lock);
-       enable_irq(dev->irq);
+       enable_irq(irq);
 
        /* Update the error counts. */
        __de_get_stats(de);
 
-       synchronize_irq(dev->irq);
+       synchronize_irq(irq);
        de_clean_rings(de);
 
        de_init_rings(de);
@@ -2024,8 +2025,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
                goto err_out_res;
        }
 
-       dev->irq = pdev->irq;
-
        /* obtain and check validity of PCI I/O address */
        pciaddr = pci_resource_start(pdev, 1);
        if (!pciaddr) {
@@ -2050,7 +2049,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
                       pciaddr, pci_name(pdev));
                goto err_out_res;
        }
-       dev->base_addr = (unsigned long) regs;
        de->regs = regs;
 
        de_adapter_wake(de);
@@ -2078,11 +2076,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
                goto err_out_iomap;
 
        /* print info about board and interface just registered */
-       netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+       netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
                    de->de21040 ? "21040" : "21041",
-                   dev->base_addr,
-                   dev->dev_addr,
-                   dev->irq);
+                   regs, dev->dev_addr, pdev->irq);
 
        pci_set_drvdata(pdev, dev);
 
@@ -2130,9 +2126,11 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
 
        rtnl_lock();
        if (netif_running (dev)) {
+               const int irq = pdev->irq;
+
                del_timer_sync(&de->media_timer);
 
-               disable_irq(dev->irq);
+               disable_irq(irq);
                spin_lock_irq(&de->lock);
 
                de_stop_hw(de);
@@ -2141,12 +2139,12 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
                netif_carrier_off(dev);
 
                spin_unlock_irq(&de->lock);
-               enable_irq(dev->irq);
+               enable_irq(irq);
 
                /* Update the error counts. */
                __de_get_stats(de);
 
-               synchronize_irq(dev->irq);
+               synchronize_irq(irq);
                de_clean_rings(de);
 
                de_adapter_sleep(de);
index 1eccf494548575b6879d5aa78378bb220f271c0c..0ef5b68acd0509d05c6fb14077426b185baadc0c 100644 (file)
 #define DMFE_TX_TIMEOUT ((3*HZ)/2)     /* tx packet time-out time 1.5 s" */
 #define DMFE_TX_KICK   (HZ/2)  /* tx packet Kick-out time 0.5 s" */
 
+#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
+#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
+#define dr32(reg)      ioread32(ioaddr + (reg))
+#define dr16(reg)      ioread16(ioaddr + (reg))
+#define dr8(reg)       ioread8(ioaddr + (reg))
+
 #define DMFE_DBUG(dbug_now, msg, value)                        \
        do {                                            \
                if (dmfe_debug || (dbug_now))           \
 
 #define SROM_V41_CODE   0x14
 
-#define SROM_CLK_WRITE(data, ioaddr) \
-       outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
-       udelay(5); \
-       outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
-       udelay(5); \
-       outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
-       udelay(5);
-
 #define __CHK_IO_SIZE(pci_id, dev_rev) \
  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
        DM9102A_IO_SIZE: DM9102_IO_SIZE)
@@ -213,11 +211,11 @@ struct rx_desc {
 struct dmfe_board_info {
        u32 chip_id;                    /* Chip vendor/Device ID */
        u8 chip_revision;               /* Chip revision */
-       struct DEVICE *next_dev;        /* next device */
+       struct net_device *next_dev;    /* next device */
        struct pci_dev *pdev;           /* PCI device */
        spinlock_t lock;
 
-       long ioaddr;                    /* I/O base address */
+       void __iomem *ioaddr;           /* I/O base address */
        u32 cr0_data;
        u32 cr5_data;
        u32 cr6_data;
@@ -320,20 +318,20 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
 static int dmfe_stop(struct DEVICE *);
 static void dmfe_set_filter_mode(struct DEVICE *);
 static const struct ethtool_ops netdev_ethtool_ops;
-static u16 read_srom_word(long ,int);
+static u16 read_srom_word(void __iomem *, int);
 static irqreturn_t dmfe_interrupt(int , void *);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void poll_dmfe (struct net_device *dev);
 #endif
-static void dmfe_descriptor_init(struct net_device *, unsigned long);
+static void dmfe_descriptor_init(struct net_device *);
 static void allocate_rx_buffer(struct net_device *);
-static void update_cr6(u32, unsigned long);
+static void update_cr6(u32, void __iomem *);
 static void send_filter_frame(struct DEVICE *);
 static void dm9132_id_table(struct DEVICE *);
-static u16 phy_read(unsigned long, u8, u8, u32);
-static void phy_write(unsigned long, u8, u8, u16, u32);
-static void phy_write_1bit(unsigned long, u32);
-static u16 phy_read_1bit(unsigned long);
+static u16 phy_read(void __iomem *, u8, u8, u32);
+static void phy_write(void __iomem *, u8, u8, u16, u32);
+static void phy_write_1bit(void __iomem *, u32);
+static u16 phy_read_1bit(void __iomem *);
 static u8 dmfe_sense_speed(struct dmfe_board_info *);
 static void dmfe_process_mode(struct dmfe_board_info *);
 static void dmfe_timer(unsigned long);
@@ -462,14 +460,16 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
        db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 
        db->chip_id = ent->driver_data;
-       db->ioaddr = pci_resource_start(pdev, 0);
+       /* IO type range. */
+       db->ioaddr = pci_iomap(pdev, 0, 0);
+       if (!db->ioaddr)
+               goto err_out_free_buf;
+
        db->chip_revision = pdev->revision;
        db->wol_mode = 0;
 
        db->pdev = pdev;
 
-       dev->base_addr = db->ioaddr;
-       dev->irq = pdev->irq;
        pci_set_drvdata(pdev, dev);
        dev->netdev_ops = &netdev_ops;
        dev->ethtool_ops = &netdev_ethtool_ops;
@@ -484,9 +484,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
                db->chip_type = 0;
 
        /* read 64 word srom data */
-       for (i = 0; i < 64; i++)
+       for (i = 0; i < 64; i++) {
                ((__le16 *) db->srom)[i] =
                        cpu_to_le16(read_srom_word(db->ioaddr, i));
+       }
 
        /* Set Node address */
        for (i = 0; i < 6; i++)
@@ -494,16 +495,18 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
 
        err = register_netdev (dev);
        if (err)
-               goto err_out_free_buf;
+               goto err_out_unmap;
 
        dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
                 ent->driver_data >> 16,
-                pci_name(pdev), dev->dev_addr, dev->irq);
+                pci_name(pdev), dev->dev_addr, pdev->irq);
 
        pci_set_master(pdev);
 
        return 0;
 
+err_out_unmap:
+       pci_iounmap(pdev, db->ioaddr);
 err_out_free_buf:
        pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
                            db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -532,7 +535,7 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
        if (dev) {
 
                unregister_netdev(dev);
-
+               pci_iounmap(db->pdev, db->ioaddr);
                pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
                                        DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
                                        db->desc_pool_dma_ptr);
@@ -555,13 +558,13 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
 
 static int dmfe_open(struct DEVICE *dev)
 {
-       int ret;
        struct dmfe_board_info *db = netdev_priv(dev);
+       const int irq = db->pdev->irq;
+       int ret;
 
        DMFE_DBUG(0, "dmfe_open", 0);
 
-       ret = request_irq(dev->irq, dmfe_interrupt,
-                         IRQF_SHARED, dev->name, dev);
+       ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
        if (ret)
                return ret;
 
@@ -615,14 +618,14 @@ static int dmfe_open(struct DEVICE *dev)
 static void dmfe_init_dm910x(struct DEVICE *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
-       unsigned long ioaddr = db->ioaddr;
+       void __iomem *ioaddr = db->ioaddr;
 
        DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
 
        /* Reset DM910x MAC controller */
-       outl(DM910X_RESET, ioaddr + DCR0);      /* RESET MAC */
+       dw32(DCR0, DM910X_RESET);       /* RESET MAC */
        udelay(100);
-       outl(db->cr0_data, ioaddr + DCR0);
+       dw32(DCR0, db->cr0_data);
        udelay(5);
 
        /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
@@ -633,12 +636,12 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
        db->media_mode = dmfe_media_mode;
 
        /* RESET Phyxcer Chip by GPR port bit 7 */
-       outl(0x180, ioaddr + DCR12);            /* Let bit 7 output port */
+       dw32(DCR12, 0x180);             /* Let bit 7 output port */
        if (db->chip_id == PCI_DM9009_ID) {
-               outl(0x80, ioaddr + DCR12);     /* Issue RESET signal */
+               dw32(DCR12, 0x80);      /* Issue RESET signal */
                mdelay(300);                    /* Delay 300 ms */
        }
-       outl(0x0, ioaddr + DCR12);      /* Clear RESET signal */
+       dw32(DCR12, 0x0);       /* Clear RESET signal */
 
        /* Process Phyxcer Media Mode */
        if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
@@ -649,7 +652,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
                db->op_mode = db->media_mode;   /* Force Mode */
 
        /* Initialize Transmit/Receive decriptor and CR3/4 */
-       dmfe_descriptor_init(dev, ioaddr);
+       dmfe_descriptor_init(dev);
 
        /* Init CR6 to program DM910x operation */
        update_cr6(db->cr6_data, ioaddr);
@@ -662,10 +665,10 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
 
        /* Init CR7, interrupt active bit */
        db->cr7_data = CR7_DEFAULT;
-       outl(db->cr7_data, ioaddr + DCR7);
+       dw32(DCR7, db->cr7_data);
 
        /* Init CR15, Tx jabber and Rx watchdog timer */
-       outl(db->cr15_data, ioaddr + DCR15);
+       dw32(DCR15, db->cr15_data);
 
        /* Enable DM910X Tx/Rx function */
        db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
@@ -682,6 +685,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
                                         struct DEVICE *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
        struct tx_desc *txptr;
        unsigned long flags;
 
@@ -707,7 +711,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
        }
 
        /* Disable NIC interrupt */
-       outl(0, dev->base_addr + DCR7);
+       dw32(DCR7, 0);
 
        /* transmit this packet */
        txptr = db->tx_insert_ptr;
@@ -721,11 +725,11 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
        if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
                db->tx_packet_cnt++;                    /* Ready to send */
-               outl(0x1, dev->base_addr + DCR1);       /* Issue Tx polling */
+               dw32(DCR1, 0x1);                        /* Issue Tx polling */
                dev->trans_start = jiffies;             /* saved time stamp */
        } else {
                db->tx_queue_cnt++;                     /* queue TX packet */
-               outl(0x1, dev->base_addr + DCR1);       /* Issue Tx polling */
+               dw32(DCR1, 0x1);                        /* Issue Tx polling */
        }
 
        /* Tx resource check */
@@ -734,7 +738,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
 
        /* Restore CR7 to enable interrupt */
        spin_unlock_irqrestore(&db->lock, flags);
-       outl(db->cr7_data, dev->base_addr + DCR7);
+       dw32(DCR7, db->cr7_data);
 
        /* free this SKB */
        dev_kfree_skb(skb);
@@ -751,7 +755,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
 static int dmfe_stop(struct DEVICE *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
-       unsigned long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = db->ioaddr;
 
        DMFE_DBUG(0, "dmfe_stop", 0);
 
@@ -762,12 +766,12 @@ static int dmfe_stop(struct DEVICE *dev)
        del_timer_sync(&db->timer);
 
        /* Reset & stop DM910X board */
-       outl(DM910X_RESET, ioaddr + DCR0);
+       dw32(DCR0, DM910X_RESET);
        udelay(5);
-       phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+       phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
 
        /* free interrupt */
-       free_irq(dev->irq, dev);
+       free_irq(db->pdev->irq, dev);
 
        /* free allocated rx buffer */
        dmfe_free_rxbuffer(db);
@@ -794,7 +798,7 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 {
        struct DEVICE *dev = dev_id;
        struct dmfe_board_info *db = netdev_priv(dev);
-       unsigned long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = db->ioaddr;
        unsigned long flags;
 
        DMFE_DBUG(0, "dmfe_interrupt()", 0);
@@ -802,15 +806,15 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
        spin_lock_irqsave(&db->lock, flags);
 
        /* Got DM910X status */
-       db->cr5_data = inl(ioaddr + DCR5);
-       outl(db->cr5_data, ioaddr + DCR5);
+       db->cr5_data = dr32(DCR5);
+       dw32(DCR5, db->cr5_data);
        if ( !(db->cr5_data & 0xc1) ) {
                spin_unlock_irqrestore(&db->lock, flags);
                return IRQ_HANDLED;
        }
 
        /* Disable all interrupt in CR7 to solve the interrupt edge problem */
-       outl(0, ioaddr + DCR7);
+       dw32(DCR7, 0);
 
        /* Check system status */
        if (db->cr5_data & 0x2000) {
@@ -838,11 +842,11 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
        if (db->dm910x_chk_mode & 0x2) {
                db->dm910x_chk_mode = 0x4;
                db->cr6_data |= 0x100;
-               update_cr6(db->cr6_data, db->ioaddr);
+               update_cr6(db->cr6_data, ioaddr);
        }
 
        /* Restore CR7 to enable interrupt mask */
-       outl(db->cr7_data, ioaddr + DCR7);
+       dw32(DCR7, db->cr7_data);
 
        spin_unlock_irqrestore(&db->lock, flags);
        return IRQ_HANDLED;
@@ -858,11 +862,14 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 
 static void poll_dmfe (struct net_device *dev)
 {
+       struct dmfe_board_info *db = netdev_priv(dev);
+       const int irq = db->pdev->irq;
+
        /* disable_irq here is not very nice, but with the lockless
           interrupt handler we have no other choice. */
-       disable_irq(dev->irq);
-       dmfe_interrupt (dev->irq, dev);
-       enable_irq(dev->irq);
+       disable_irq(irq);
+       dmfe_interrupt (irq, dev);
+       enable_irq(irq);
 }
 #endif
 
@@ -873,7 +880,7 @@ static void poll_dmfe (struct net_device *dev)
 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
 {
        struct tx_desc *txptr;
-       unsigned long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = db->ioaddr;
        u32 tdes0;
 
        txptr = db->tx_remove_ptr;
@@ -897,7 +904,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
                                        db->tx_fifo_underrun++;
                                        if ( !(db->cr6_data & CR6_SFT) ) {
                                                db->cr6_data = db->cr6_data | CR6_SFT;
-                                               update_cr6(db->cr6_data, db->ioaddr);
+                                               update_cr6(db->cr6_data, ioaddr);
                                        }
                                }
                                if (tdes0 & 0x0100)
@@ -924,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
                db->tx_packet_cnt++;                    /* Ready to send */
                db->tx_queue_cnt--;
-               outl(0x1, ioaddr + DCR1);               /* Issue Tx polling */
+               dw32(DCR1, 0x1);                        /* Issue Tx polling */
                dev->trans_start = jiffies;             /* saved time stamp */
        }
 
@@ -1087,12 +1094,7 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
 
        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-       if (np->pdev)
-               strlcpy(info->bus_info, pci_name(np->pdev),
-                       sizeof(info->bus_info));
-       else
-               sprintf(info->bus_info, "EISA 0x%lx %d",
-                       dev->base_addr, dev->irq);
+       strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 static int dmfe_ethtool_set_wol(struct net_device *dev,
@@ -1132,10 +1134,11 @@ static const struct ethtool_ops netdev_ethtool_ops = {
 
 static void dmfe_timer(unsigned long data)
 {
+       struct net_device *dev = (struct net_device *)data;
+       struct dmfe_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
        u32 tmp_cr8;
        unsigned char tmp_cr12;
-       struct DEVICE *dev = (struct DEVICE *) data;
-       struct dmfe_board_info *db = netdev_priv(dev);
        unsigned long flags;
 
        int link_ok, link_ok_phy;
@@ -1148,11 +1151,10 @@ static void dmfe_timer(unsigned long data)
                db->first_in_callback = 1;
                if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
                        db->cr6_data &= ~0x40000;
-                       update_cr6(db->cr6_data, db->ioaddr);
-                       phy_write(db->ioaddr,
-                                 db->phy_addr, 0, 0x1000, db->chip_id);
+                       update_cr6(db->cr6_data, ioaddr);
+                       phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
                        db->cr6_data |= 0x40000;
-                       update_cr6(db->cr6_data, db->ioaddr);
+                       update_cr6(db->cr6_data, ioaddr);
                        db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
                        add_timer(&db->timer);
                        spin_unlock_irqrestore(&db->lock, flags);
@@ -1167,7 +1169,7 @@ static void dmfe_timer(unsigned long data)
                db->dm910x_chk_mode = 0x4;
 
        /* Dynamic reset DM910X : system error or transmit time-out */
-       tmp_cr8 = inl(db->ioaddr + DCR8);
+       tmp_cr8 = dr32(DCR8);
        if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
                db->reset_cr8++;
                db->wait_reset = 1;
@@ -1177,7 +1179,7 @@ static void dmfe_timer(unsigned long data)
        /* TX polling kick monitor */
        if ( db->tx_packet_cnt &&
             time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
-               outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
+               dw32(DCR1, 0x1);   /* Tx polling again */
 
                /* TX Timeout */
                if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
@@ -1200,9 +1202,9 @@ static void dmfe_timer(unsigned long data)
 
        /* Link status check, Dynamic media type change */
        if (db->chip_id == PCI_DM9132_ID)
-               tmp_cr12 = inb(db->ioaddr + DCR9 + 3);  /* DM9132 */
+               tmp_cr12 = dr8(DCR9 + 3);       /* DM9132 */
        else
-               tmp_cr12 = inb(db->ioaddr + DCR12);     /* DM9102/DM9102A */
+               tmp_cr12 = dr8(DCR12);          /* DM9102/DM9102A */
 
        if ( ((db->chip_id == PCI_DM9102_ID) &&
                (db->chip_revision == 0x30)) ||
@@ -1251,7 +1253,7 @@ static void dmfe_timer(unsigned long data)
                        /* 10/100M link failed, used 1M Home-Net */
                        db->cr6_data|=0x00040000;       /* bit18=1, MII */
                        db->cr6_data&=~0x00000200;      /* bit9=0, HD mode */
-                       update_cr6(db->cr6_data, db->ioaddr);
+                       update_cr6(db->cr6_data, ioaddr);
                }
        } else if (!netif_carrier_ok(dev)) {
 
@@ -1288,17 +1290,18 @@ static void dmfe_timer(unsigned long data)
  *     Re-initialize DM910X board
  */
 
-static void dmfe_dynamic_reset(struct DEVICE *dev)
+static void dmfe_dynamic_reset(struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
 
        DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
 
        /* Sopt MAC controller */
        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
-       update_cr6(db->cr6_data, dev->base_addr);
-       outl(0, dev->base_addr + DCR7);         /* Disable Interrupt */
-       outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+       update_cr6(db->cr6_data, ioaddr);
+       dw32(DCR7, 0);                          /* Disable Interrupt */
+       dw32(DCR5, dr32(DCR5));
 
        /* Disable upper layer interface */
        netif_stop_queue(dev);
@@ -1364,9 +1367,10 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
  *     Using Chain structure, and allocate Tx/Rx buffer
  */
 
-static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
+static void dmfe_descriptor_init(struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
        struct tx_desc *tmp_tx;
        struct rx_desc *tmp_rx;
        unsigned char *tmp_buf;
@@ -1379,7 +1383,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
        /* tx descriptor start pointer */
        db->tx_insert_ptr = db->first_tx_desc;
        db->tx_remove_ptr = db->first_tx_desc;
-       outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+       dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
 
        /* rx descriptor start pointer */
        db->first_rx_desc = (void *)db->first_tx_desc +
@@ -1389,7 +1393,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
                        sizeof(struct tx_desc) * TX_DESC_CNT;
        db->rx_insert_ptr = db->first_rx_desc;
        db->rx_ready_ptr = db->first_rx_desc;
-       outl(db->first_rx_desc_dma, ioaddr + DCR3);     /* RX DESC address */
+       dw32(DCR3, db->first_rx_desc_dma);              /* RX DESC address */
 
        /* Init Transmit chain */
        tmp_buf = db->buf_pool_start;
@@ -1431,14 +1435,14 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
  *     Firstly stop DM910X , then written value and start
  */
 
-static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
 {
        u32 cr6_tmp;
 
        cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
-       outl(cr6_tmp, ioaddr + DCR6);
+       dw32(DCR6, cr6_tmp);
        udelay(5);
-       outl(cr6_data, ioaddr + DCR6);
+       dw32(DCR6, cr6_data);
        udelay(5);
 }
 
@@ -1448,24 +1452,19 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
  *     This setup frame initialize DM910X address filter mode
 */
 
-static void dm9132_id_table(struct DEVICE *dev)
+static void dm9132_id_table(struct net_device *dev)
 {
+       struct dmfe_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr + 0xc0;
+       u16 *addrptr = (u16 *)dev->dev_addr;
        struct netdev_hw_addr *ha;
-       u16 * addrptr;
-       unsigned long ioaddr = dev->base_addr+0xc0;             /* ID Table */
-       u32 hash_val;
        u16 i, hash_table[4];
 
-       DMFE_DBUG(0, "dm9132_id_table()", 0);
-
        /* Node address */
-       addrptr = (u16 *) dev->dev_addr;
-       outw(addrptr[0], ioaddr);
-       ioaddr += 4;
-       outw(addrptr[1], ioaddr);
-       ioaddr += 4;
-       outw(addrptr[2], ioaddr);
-       ioaddr += 4;
+       for (i = 0; i < 3; i++) {
+               dw16(0, addrptr[i]);
+               ioaddr += 4;
+       }
 
        /* Clear Hash Table */
        memset(hash_table, 0, sizeof(hash_table));
@@ -1475,13 +1474,14 @@ static void dm9132_id_table(struct DEVICE *dev)
 
        /* the multicast address in Hash Table : 64 bits */
        netdev_for_each_mc_addr(ha, dev) {
-               hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
+               u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
+
                hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
        }
 
        /* Write the hash table to MAC MD table */
        for (i = 0; i < 4; i++, ioaddr += 4)
-               outw(hash_table[i], ioaddr);
+               dw16(0, hash_table[i]);
 }
 
 
@@ -1490,7 +1490,7 @@ static void dm9132_id_table(struct DEVICE *dev)
  *     This setup frame initialize DM910X address filter mode
  */
 
-static void send_filter_frame(struct DEVICE *dev)
+static void send_filter_frame(struct net_device *dev)
 {
        struct dmfe_board_info *db = netdev_priv(dev);
        struct netdev_hw_addr *ha;
@@ -1535,12 +1535,14 @@ static void send_filter_frame(struct DEVICE *dev)
 
        /* Resource Check and Send the setup packet */
        if (!db->tx_packet_cnt) {
+               void __iomem *ioaddr = db->ioaddr;
+
                /* Resource Empty */
                db->tx_packet_cnt++;
                txptr->tdes0 = cpu_to_le32(0x80000000);
-               update_cr6(db->cr6_data | 0x2000, dev->base_addr);
-               outl(0x1, dev->base_addr + DCR1);       /* Issue Tx polling */
-               update_cr6(db->cr6_data, dev->base_addr);
+               update_cr6(db->cr6_data | 0x2000, ioaddr);
+               dw32(DCR1, 0x1);        /* Issue Tx polling */
+               update_cr6(db->cr6_data, ioaddr);
                dev->trans_start = jiffies;
        } else
                db->tx_queue_cnt++;     /* Put in TX queue */
@@ -1575,43 +1577,55 @@ static void allocate_rx_buffer(struct net_device *dev)
        db->rx_insert_ptr = rxptr;
 }
 
+static void srom_clk_write(void __iomem *ioaddr, u32 data)
+{
+       static const u32 cmd[] = {
+               CR9_SROM_READ | CR9_SRCS,
+               CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
+               CR9_SROM_READ | CR9_SRCS
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cmd); i++) {
+               dw32(DCR9, data | cmd[i]);
+               udelay(5);
+       }
+}
 
 /*
  *     Read one word data from the serial ROM
  */
-
-static u16 read_srom_word(long ioaddr, int offset)
+static u16 read_srom_word(void __iomem *ioaddr, int offset)
 {
+       u16 srom_data;
        int i;
-       u16 srom_data = 0;
-       long cr9_ioaddr = ioaddr + DCR9;
 
-       outl(CR9_SROM_READ, cr9_ioaddr);
-       outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+       dw32(DCR9, CR9_SROM_READ);
+       dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
        /* Send the Read Command 110b */
-       SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-       SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-       SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+       srom_clk_write(ioaddr, SROM_DATA_1);
+       srom_clk_write(ioaddr, SROM_DATA_1);
+       srom_clk_write(ioaddr, SROM_DATA_0);
 
        /* Send the offset */
        for (i = 5; i >= 0; i--) {
                srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
-               SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+               srom_clk_write(ioaddr, srom_data);
        }
 
-       outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+       dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
        for (i = 16; i > 0; i--) {
-               outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+               dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
                udelay(5);
                srom_data = (srom_data << 1) |
-                               ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
-               outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+                               ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
+               dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
                udelay(5);
        }
 
-       outl(CR9_SROM_READ, cr9_ioaddr);
+       dw32(DCR9, CR9_SROM_READ);
        return srom_data;
 }
 
@@ -1620,13 +1634,14 @@ static u16 read_srom_word(long ioaddr, int offset)
  *     Auto sense the media mode
  */
 
-static u8 dmfe_sense_speed(struct dmfe_board_info * db)
+static u8 dmfe_sense_speed(struct dmfe_board_info *db)
 {
+       void __iomem *ioaddr = db->ioaddr;
        u8 ErrFlag = 0;
        u16 phy_mode;
 
        /* CR6 bit18=0, select 10/100M */
-       update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
+       update_cr6(db->cr6_data & ~0x40000, ioaddr);
 
        phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
        phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
@@ -1665,11 +1680,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
 
 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
 {
+       void __iomem *ioaddr = db->ioaddr;
        u16 phy_reg;
 
        /* Select 10/100M phyxcer */
        db->cr6_data &= ~0x40000;
-       update_cr6(db->cr6_data, db->ioaddr);
+       update_cr6(db->cr6_data, ioaddr);
 
        /* DM9009 Chip: Phyxcer reg18 bit12=0 */
        if (db->chip_id == PCI_DM9009_ID) {
@@ -1765,18 +1781,15 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
  *     Write a word to Phy register
  */
 
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
+static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
                      u16 phy_data, u32 chip_id)
 {
        u16 i;
-       unsigned long ioaddr;
 
        if (chip_id == PCI_DM9132_ID) {
-               ioaddr = iobase + 0x80 + offset * 4;
-               outw(phy_data, ioaddr);
+               dw16(0x80 + offset * 4, phy_data);
        } else {
                /* DM9102/DM9102A Chip */
-               ioaddr = iobase + DCR9;
 
                /* Send 33 synchronization clock to Phy controller */
                for (i = 0; i < 35; i++)
@@ -1816,19 +1829,16 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
  *     Read a word data from phy register
  */
 
-static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
 {
        int i;
        u16 phy_data;
-       unsigned long ioaddr;
 
        if (chip_id == PCI_DM9132_ID) {
                /* DM9132 Chip */
-               ioaddr = iobase + 0x80 + offset * 4;
-               phy_data = inw(ioaddr);
+               phy_data = dr16(0x80 + offset * 4);
        } else {
                /* DM9102/DM9102A Chip */
-               ioaddr = iobase + DCR9;
 
                /* Send 33 synchronization clock to Phy controller */
                for (i = 0; i < 35; i++)
@@ -1870,13 +1880,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
  *     Write one bit data to Phy Controller
  */
 
-static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
+static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
 {
-       outl(phy_data, ioaddr);                 /* MII Clock Low */
+       dw32(DCR9, phy_data);           /* MII Clock Low */
        udelay(1);
-       outl(phy_data | MDCLKH, ioaddr);        /* MII Clock High */
+       dw32(DCR9, phy_data | MDCLKH);  /* MII Clock High */
        udelay(1);
-       outl(phy_data, ioaddr);                 /* MII Clock Low */
+       dw32(DCR9, phy_data);           /* MII Clock Low */
        udelay(1);
 }
 
@@ -1885,14 +1895,14 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
  *     Read one bit phy data from PHY controller
  */
 
-static u16 phy_read_1bit(unsigned long ioaddr)
+static u16 phy_read_1bit(void __iomem *ioaddr)
 {
        u16 phy_data;
 
-       outl(0x50000, ioaddr);
+       dw32(DCR9, 0x50000);
        udelay(1);
-       phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
-       outl(0x40000, ioaddr);
+       phy_data = (dr32(DCR9) >> 19) & 0x1;
+       dw32(DCR9, 0x40000);
        udelay(1);
 
        return phy_data;
@@ -1978,7 +1988,7 @@ static void dmfe_parse_srom(struct dmfe_board_info * db)
 
        /* Check DM9801 or DM9802 present or not */
        db->HPNA_present = 0;
-       update_cr6(db->cr6_data|0x40000, db->ioaddr);
+       update_cr6(db->cr6_data | 0x40000, db->ioaddr);
        tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
        if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
                /* DM9801 or DM9802 present */
@@ -2095,6 +2105,7 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
 {
        struct net_device *dev = pci_get_drvdata(pci_dev);
        struct dmfe_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
        u32 tmp;
 
        /* Disable upper layer interface */
@@ -2102,11 +2113,11 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
 
        /* Disable Tx/Rx */
        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
-       update_cr6(db->cr6_data, dev->base_addr);
+       update_cr6(db->cr6_data, ioaddr);
 
        /* Disable Interrupt */
-       outl(0, dev->base_addr + DCR7);
-       outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
+       dw32(DCR7, 0);
+       dw32(DCR5, dr32(DCR5));
 
        /* Fre RX buffers */
        dmfe_free_rxbuffer(db);
index fea3641d939871cf9dff0c1d500b8f4e59528b09..c4f37aca22699a5700b561bf72253c428a35247e 100644 (file)
@@ -328,7 +328,7 @@ static void tulip_up(struct net_device *dev)
        udelay(100);
 
        if (tulip_debug > 1)
-               netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
+               netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
 
        iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
        iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -515,11 +515,13 @@ media_picked:
 static int
 tulip_open(struct net_device *dev)
 {
+       struct tulip_private *tp = netdev_priv(dev);
        int retval;
 
        tulip_init_ring (dev);
 
-       retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
+       retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
+                            dev->name, dev);
        if (retval)
                goto free_ring;
 
@@ -841,7 +843,7 @@ static int tulip_close (struct net_device *dev)
                netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
                           ioread32 (ioaddr + CSR5));
 
-       free_irq (dev->irq, dev);
+       free_irq (tp->pdev->irq, dev);
 
        tulip_free_ring (dev);
 
@@ -1489,8 +1491,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
 
        INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
 
-       dev->base_addr = (unsigned long)ioaddr;
-
 #ifdef CONFIG_TULIP_MWI
        if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
                tulip_mwi_config (pdev, dev);
@@ -1650,7 +1650,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        for (i = 0; i < 6; i++)
                last_phys_addr[i] = dev->dev_addr[i];
        last_irq = irq;
-       dev->irq = irq;
 
        /* The lower four bits are the media type. */
        if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
@@ -1858,7 +1857,8 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
        tulip_down(dev);
 
        netif_device_detach(dev);
-       free_irq(dev->irq, dev);
+       /* FIXME: it needlessly adds an error path. */
+       free_irq(tp->pdev->irq, dev);
 
 save_state:
        pci_save_state(pdev);
@@ -1900,7 +1900,9 @@ static int tulip_resume(struct pci_dev *pdev)
                return retval;
        }
 
-       if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
+       retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
+                            dev->name, dev);
+       if (retval) {
                pr_err("request_irq failed in resume\n");
                return retval;
        }
@@ -1960,11 +1962,14 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
 
 static void poll_tulip (struct net_device *dev)
 {
+       struct tulip_private *tp = netdev_priv(dev);
+       const int irq = tp->pdev->irq;
+
        /* disable_irq here is not very nice, but with the lockless
           interrupt handler we have no other choice. */
-       disable_irq(dev->irq);
-       tulip_interrupt (dev->irq, dev);
-       enable_irq(dev->irq);
+       disable_irq(irq);
+       tulip_interrupt (irq, dev);
+       enable_irq(irq);
 }
 #endif
 
index fc4001f6a5e43e56ebf4cb4eca556f4432272574..75d45f8a37dc170db51678919acbc27cc9cd2ac5 100644 (file)
@@ -42,6 +42,8 @@
 #include <asm/dma.h>
 #include <asm/uaccess.h>
 
+#define uw32(reg, val) iowrite32(val, ioaddr + (reg))
+#define ur32(reg)      ioread32(ioaddr + (reg))
 
 /* Board/System/Debug information/definition ---------------- */
 #define PCI_ULI5261_ID  0x526110B9     /* ULi M5261 ID*/
@@ -110,14 +112,6 @@ do {                                                               \
 
 #define SROM_V41_CODE   0x14
 
-#define SROM_CLK_WRITE(data, ioaddr)                                   \
-               outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);               \
-               udelay(5);                                              \
-               outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);     \
-               udelay(5);                                              \
-               outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);               \
-               udelay(5);
-
 /* Structure/enum declaration ------------------------------- */
 struct tx_desc {
         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -132,12 +126,15 @@ struct rx_desc {
 } __attribute__(( aligned(32) ));
 
 struct uli526x_board_info {
-       u32 chip_id;                    /* Chip vendor/Device ID */
+       struct uli_phy_ops {
+               void (*write)(struct uli526x_board_info *, u8, u8, u16);
+               u16 (*read)(struct uli526x_board_info *, u8, u8);
+       } phy;
        struct net_device *next_dev;    /* next device */
        struct pci_dev *pdev;           /* PCI device */
        spinlock_t lock;
 
-       long ioaddr;                    /* I/O base address */
+       void __iomem *ioaddr;           /* I/O base address */
        u32 cr0_data;
        u32 cr5_data;
        u32 cr6_data;
@@ -227,21 +224,21 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
 static int uli526x_stop(struct net_device *);
 static void uli526x_set_filter_mode(struct net_device *);
 static const struct ethtool_ops netdev_ethtool_ops;
-static u16 read_srom_word(long, int);
+static u16 read_srom_word(struct uli526x_board_info *, int);
 static irqreturn_t uli526x_interrupt(int, void *);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void uli526x_poll(struct net_device *dev);
 #endif
-static void uli526x_descriptor_init(struct net_device *, unsigned long);
+static void uli526x_descriptor_init(struct net_device *, void __iomem *);
 static void allocate_rx_buffer(struct net_device *);
-static void update_cr6(u32, unsigned long);
+static void update_cr6(u32, void __iomem *);
 static void send_filter_frame(struct net_device *, int);
-static u16 phy_read(unsigned long, u8, u8, u32);
-static u16 phy_readby_cr10(unsigned long, u8, u8);
-static void phy_write(unsigned long, u8, u8, u16, u32);
-static void phy_writeby_cr10(unsigned long, u8, u8, u16);
-static void phy_write_1bit(unsigned long, u32, u32);
-static u16 phy_read_1bit(unsigned long, u32);
+static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
+static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
+static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
+static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
+static void phy_write_1bit(struct uli526x_board_info *db, u32);
+static u16 phy_read_1bit(struct uli526x_board_info *db);
 static u8 uli526x_sense_speed(struct uli526x_board_info *);
 static void uli526x_process_mode(struct uli526x_board_info *);
 static void uli526x_timer(unsigned long);
@@ -253,6 +250,18 @@ static void uli526x_free_rxbuffer(struct uli526x_board_info *);
 static void uli526x_init(struct net_device *);
 static void uli526x_set_phyxcer(struct uli526x_board_info *);
 
+static void srom_clk_write(struct uli526x_board_info *db, u32 data)
+{
+       void __iomem *ioaddr = db->ioaddr;
+
+       uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
+       udelay(5);
+       uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
+       udelay(5);
+       uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
+       udelay(5);
+}
+
 /* ULI526X network board routine ---------------------------- */
 
 static const struct net_device_ops netdev_ops = {
@@ -277,6 +286,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
 {
        struct uli526x_board_info *db;  /* board information structure */
        struct net_device *dev;
+       void __iomem *ioaddr;
        int i, err;
 
        ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -313,9 +323,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
                goto err_out_disable;
        }
 
-       if (pci_request_regions(pdev, DRV_NAME)) {
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err < 0) {
                pr_err("Failed to request PCI regions\n");
-               err = -ENODEV;
                goto err_out_disable;
        }
 
@@ -323,32 +333,41 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
        db = netdev_priv(dev);
 
        /* Allocate Tx/Rx descriptor memory */
+       err = -ENOMEM;
+
        db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
-       if(db->desc_pool_ptr == NULL)
-       {
-               err = -ENOMEM;
-               goto err_out_nomem;
-       }
+       if (!db->desc_pool_ptr)
+               goto err_out_release;
+
        db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
-       if(db->buf_pool_ptr == NULL)
-       {
-               err = -ENOMEM;
-               goto err_out_nomem;
-       }
+       if (!db->buf_pool_ptr)
+               goto err_out_free_tx_desc;
 
        db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
        db->first_tx_desc_dma = db->desc_pool_dma_ptr;
        db->buf_pool_start = db->buf_pool_ptr;
        db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 
-       db->chip_id = ent->driver_data;
-       db->ioaddr = pci_resource_start(pdev, 0);
+       switch (ent->driver_data) {
+       case PCI_ULI5263_ID:
+               db->phy.write   = phy_writeby_cr10;
+               db->phy.read    = phy_readby_cr10;
+               break;
+       default:
+               db->phy.write   = phy_writeby_cr9;
+               db->phy.read    = phy_readby_cr9;
+               break;
+       }
+
+       /* IO region. */
+       ioaddr = pci_iomap(pdev, 0, 0);
+       if (!ioaddr)
+               goto err_out_free_tx_buf;
 
+       db->ioaddr = ioaddr;
        db->pdev = pdev;
        db->init = 1;
 
-       dev->base_addr = db->ioaddr;
-       dev->irq = pdev->irq;
        pci_set_drvdata(pdev, dev);
 
        /* Register some necessary functions */
@@ -360,24 +379,24 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
 
        /* read 64 word srom data */
        for (i = 0; i < 64; i++)
-               ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+               ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
 
        /* Set Node address */
        if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0)               /* SROM absent, so read MAC address from ID Table */
        {
-               outl(0x10000, db->ioaddr + DCR0);       //Diagnosis mode
-               outl(0x1c0, db->ioaddr + DCR13);        //Reset dianostic pointer port
-               outl(0, db->ioaddr + DCR14);            //Clear reset port
-               outl(0x10, db->ioaddr + DCR14);         //Reset ID Table pointer
-               outl(0, db->ioaddr + DCR14);            //Clear reset port
-               outl(0, db->ioaddr + DCR13);            //Clear CR13
-               outl(0x1b0, db->ioaddr + DCR13);        //Select ID Table access port
+               uw32(DCR0, 0x10000);    //Diagnosis mode
+               uw32(DCR13, 0x1c0);     //Reset dianostic pointer port
+               uw32(DCR14, 0);         //Clear reset port
+               uw32(DCR14, 0x10);      //Reset ID Table pointer
+               uw32(DCR14, 0);         //Clear reset port
+               uw32(DCR13, 0);         //Clear CR13
+               uw32(DCR13, 0x1b0);     //Select ID Table access port
                //Read MAC address from CR14
                for (i = 0; i < 6; i++)
-                       dev->dev_addr[i] = inl(db->ioaddr + DCR14);
+                       dev->dev_addr[i] = ur32(DCR14);
                //Read end
-               outl(0, db->ioaddr + DCR13);    //Clear CR13
-               outl(0, db->ioaddr + DCR0);             //Clear CR0
+               uw32(DCR13, 0);         //Clear CR13
+               uw32(DCR0, 0);          //Clear CR0
                udelay(10);
        }
        else            /*Exist SROM*/
@@ -387,26 +406,26 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
        }
        err = register_netdev (dev);
        if (err)
-               goto err_out_res;
+               goto err_out_unmap;
 
        netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
                    ent->driver_data >> 16, pci_name(pdev),
-                   dev->dev_addr, dev->irq);
+                   dev->dev_addr, pdev->irq);
 
        pci_set_master(pdev);
 
        return 0;
 
-err_out_res:
+err_out_unmap:
+       pci_iounmap(pdev, db->ioaddr);
+err_out_free_tx_buf:
+       pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+                           db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_free_tx_desc:
+       pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+                           db->desc_pool_ptr, db->desc_pool_dma_ptr);
+err_out_release:
        pci_release_regions(pdev);
-err_out_nomem:
-       if(db->desc_pool_ptr)
-               pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
-                       db->desc_pool_ptr, db->desc_pool_dma_ptr);
-
-       if(db->buf_pool_ptr != NULL)
-               pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
-                       db->buf_pool_ptr, db->buf_pool_dma_ptr);
 err_out_disable:
        pci_disable_device(pdev);
 err_out_free:
@@ -422,19 +441,17 @@ static void __devexit uli526x_remove_one (struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct uli526x_board_info *db = netdev_priv(dev);
 
-       ULI526X_DBUG(0, "uli526x_remove_one()", 0);
-
+       unregister_netdev(dev);
+       pci_iounmap(pdev, db->ioaddr);
        pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
                                DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
                                db->desc_pool_dma_ptr);
        pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
                                db->buf_pool_ptr, db->buf_pool_dma_ptr);
-       unregister_netdev(dev);
        pci_release_regions(pdev);
-       free_netdev(dev);       /* free board information */
-       pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
-       ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
+       pci_set_drvdata(pdev, NULL);
+       free_netdev(dev);
 }
 
 
@@ -468,7 +485,8 @@ static int uli526x_open(struct net_device *dev)
        /* Initialize ULI526X board */
        uli526x_init(dev);
 
-       ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev);
+       ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
+                         dev->name, dev);
        if (ret)
                return ret;
 
@@ -496,57 +514,57 @@ static int uli526x_open(struct net_device *dev)
 static void uli526x_init(struct net_device *dev)
 {
        struct uli526x_board_info *db = netdev_priv(dev);
-       unsigned long ioaddr = db->ioaddr;
+       struct uli_phy_ops *phy = &db->phy;
+       void __iomem *ioaddr = db->ioaddr;
        u8      phy_tmp;
        u8      timeout;
-       u16     phy_value;
        u16 phy_reg_reset;
 
 
        ULI526X_DBUG(0, "uli526x_init()", 0);
 
        /* Reset M526x MAC controller */
-       outl(ULI526X_RESET, ioaddr + DCR0);     /* RESET MAC */
+       uw32(DCR0, ULI526X_RESET);      /* RESET MAC */
        udelay(100);
-       outl(db->cr0_data, ioaddr + DCR0);
+       uw32(DCR0, db->cr0_data);
        udelay(5);
 
        /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
        db->phy_addr = 1;
-       for(phy_tmp=0;phy_tmp<32;phy_tmp++)
-       {
-               phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
-               if(phy_value != 0xffff&&phy_value!=0)
-               {
+       for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
+               u16 phy_value;
+
+               phy_value = phy->read(db, phy_tmp, 3);  //peer add
+               if (phy_value != 0xffff && phy_value != 0) {
                        db->phy_addr = phy_tmp;
                        break;
                }
        }
-       if(phy_tmp == 32)
+
+       if (phy_tmp == 32)
                pr_warn("Can not find the phy address!!!\n");
        /* Parser SROM and media mode */
        db->media_mode = uli526x_media_mode;
 
        /* phyxcer capability setting */
-       phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
+       phy_reg_reset = phy->read(db, db->phy_addr, 0);
        phy_reg_reset = (phy_reg_reset | 0x8000);
-       phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+       phy->write(db, db->phy_addr, 0, phy_reg_reset);
 
        /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
         * functions") or phy data sheet for details on phy reset
         */
        udelay(500);
        timeout = 10;
-       while (timeout-- &&
-               phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000)
-                       udelay(100);
+       while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
+               udelay(100);
 
        /* Process Phyxcer Media Mode */
        uli526x_set_phyxcer(db);
 
        /* Media Mode Process */
        if ( !(db->media_mode & ULI526X_AUTO) )
-               db->op_mode = db->media_mode;   /* Force Mode */
+               db->op_mode = db->media_mode;           /* Force Mode */
 
        /* Initialize Transmit/Receive decriptor and CR3/4 */
        uli526x_descriptor_init(dev, ioaddr);
@@ -559,10 +577,10 @@ static void uli526x_init(struct net_device *dev)
 
        /* Init CR7, interrupt active bit */
        db->cr7_data = CR7_DEFAULT;
-       outl(db->cr7_data, ioaddr + DCR7);
+       uw32(DCR7, db->cr7_data);
 
        /* Init CR15, Tx jabber and Rx watchdog timer */
-       outl(db->cr15_data, ioaddr + DCR15);
+       uw32(DCR15, db->cr15_data);
 
        /* Enable ULI526X Tx/Rx function */
        db->cr6_data |= CR6_RXSC | CR6_TXSC;
@@ -579,6 +597,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
                                            struct net_device *dev)
 {
        struct uli526x_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
        struct tx_desc *txptr;
        unsigned long flags;
 
@@ -604,7 +623,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
        }
 
        /* Disable NIC interrupt */
-       outl(0, dev->base_addr + DCR7);
+       uw32(DCR7, 0);
 
        /* transmit this packet */
        txptr = db->tx_insert_ptr;
@@ -615,10 +634,10 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
        db->tx_insert_ptr = txptr->next_tx_desc;
 
        /* Transmit Packet Process */
-       if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
+       if (db->tx_packet_cnt < TX_DESC_CNT) {
                txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
                db->tx_packet_cnt++;                    /* Ready to send */
-               outl(0x1, dev->base_addr + DCR1);       /* Issue Tx polling */
+               uw32(DCR1, 0x1);                        /* Issue Tx polling */
                dev->trans_start = jiffies;             /* saved time stamp */
        }
 
@@ -628,7 +647,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
 
        /* Restore CR7 to enable interrupt */
        spin_unlock_irqrestore(&db->lock, flags);
-       outl(db->cr7_data, dev->base_addr + DCR7);
+       uw32(DCR7, db->cr7_data);
 
        /* free this SKB */
        dev_kfree_skb(skb);
@@ -645,9 +664,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
 static int uli526x_stop(struct net_device *dev)
 {
        struct uli526x_board_info *db = netdev_priv(dev);
-       unsigned long ioaddr = dev->base_addr;
-
-       ULI526X_DBUG(0, "uli526x_stop", 0);
+       void __iomem *ioaddr = db->ioaddr;
 
        /* disable system */
        netif_stop_queue(dev);
@@ -656,12 +673,12 @@ static int uli526x_stop(struct net_device *dev)
        del_timer_sync(&db->timer);
 
        /* Reset & stop ULI526X board */
-       outl(ULI526X_RESET, ioaddr + DCR0);
+       uw32(DCR0, ULI526X_RESET);
        udelay(5);
-       phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+       db->phy.write(db, db->phy_addr, 0, 0x8000);
 
        /* free interrupt */
-       free_irq(dev->irq, dev);
+       free_irq(db->pdev->irq, dev);
 
        /* free allocated rx buffer */
        uli526x_free_rxbuffer(db);
@@ -679,18 +696,18 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct uli526x_board_info *db = netdev_priv(dev);
-       unsigned long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = db->ioaddr;
        unsigned long flags;
 
        spin_lock_irqsave(&db->lock, flags);
-       outl(0, ioaddr + DCR7);
+       uw32(DCR7, 0);
 
        /* Got ULI526X status */
-       db->cr5_data = inl(ioaddr + DCR5);
-       outl(db->cr5_data, ioaddr + DCR5);
+       db->cr5_data = ur32(DCR5);
+       uw32(DCR5, db->cr5_data);
        if ( !(db->cr5_data & 0x180c1) ) {
                /* Restore CR7 to enable interrupt mask */
-               outl(db->cr7_data, ioaddr + DCR7);
+               uw32(DCR7, db->cr7_data);
                spin_unlock_irqrestore(&db->lock, flags);
                return IRQ_HANDLED;
        }
@@ -718,7 +735,7 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
                uli526x_free_tx_pkt(dev, db);
 
        /* Restore CR7 to enable interrupt mask */
-       outl(db->cr7_data, ioaddr + DCR7);
+       uw32(DCR7, db->cr7_data);
 
        spin_unlock_irqrestore(&db->lock, flags);
        return IRQ_HANDLED;
@@ -727,8 +744,10 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void uli526x_poll(struct net_device *dev)
 {
+       struct uli526x_board_info *db = netdev_priv(dev);
+
        /* ISR grabs the irqsave lock, so this should be safe */
-       uli526x_interrupt(dev->irq, dev);
+       uli526x_interrupt(db->pdev->irq, dev);
 }
 #endif
 
@@ -962,12 +981,7 @@ static void netdev_get_drvinfo(struct net_device *dev,
 
        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-       if (np->pdev)
-               strlcpy(info->bus_info, pci_name(np->pdev),
-                       sizeof(info->bus_info));
-       else
-               sprintf(info->bus_info, "EISA 0x%lx %d",
-                       dev->base_addr, dev->irq);
+       strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
@@ -1007,18 +1021,20 @@ static const struct ethtool_ops netdev_ethtool_ops = {
 
 static void uli526x_timer(unsigned long data)
 {
-       u32 tmp_cr8;
-       unsigned char tmp_cr12=0;
        struct net_device *dev = (struct net_device *) data;
        struct uli526x_board_info *db = netdev_priv(dev);
+       struct uli_phy_ops *phy = &db->phy;
+       void __iomem *ioaddr = db->ioaddr;
        unsigned long flags;
+       u8 tmp_cr12 = 0;
+       u32 tmp_cr8;
 
        //ULI526X_DBUG(0, "uli526x_timer()", 0);
        spin_lock_irqsave(&db->lock, flags);
 
 
        /* Dynamic reset ULI526X : system error or transmit time-out */
-       tmp_cr8 = inl(db->ioaddr + DCR8);
+       tmp_cr8 = ur32(DCR8);
        if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
                db->reset_cr8++;
                db->wait_reset = 1;
@@ -1028,7 +1044,7 @@ static void uli526x_timer(unsigned long data)
        /* TX polling kick monitor */
        if ( db->tx_packet_cnt &&
             time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
-               outl(0x1, dev->base_addr + DCR1);   // Tx polling again
+               uw32(DCR1, 0x1);   // Tx polling again
 
                // TX Timeout
                if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
@@ -1049,7 +1065,7 @@ static void uli526x_timer(unsigned long data)
        }
 
        /* Link status check, Dynamic media type change */
-       if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
+       if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
                tmp_cr12 = 3;
 
        if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
@@ -1062,7 +1078,7 @@ static void uli526x_timer(unsigned long data)
                /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
                /* AUTO don't need */
                if ( !(db->media_mode & 0x8) )
-                       phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+                       phy->write(db, db->phy_addr, 0, 0x1000);
 
                /* AUTO mode, if INT phyxcer link failed, select EXT device */
                if (db->media_mode & ULI526X_AUTO) {
@@ -1119,12 +1135,13 @@ static void uli526x_timer(unsigned long data)
 static void uli526x_reset_prepare(struct net_device *dev)
 {
        struct uli526x_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
 
        /* Sopt MAC controller */
        db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
-       update_cr6(db->cr6_data, dev->base_addr);
-       outl(0, dev->base_addr + DCR7);         /* Disable Interrupt */
-       outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+       update_cr6(db->cr6_data, ioaddr);
+       uw32(DCR7, 0);                          /* Disable Interrupt */
+       uw32(DCR5, ur32(DCR5));
 
        /* Disable upper layer interface */
        netif_stop_queue(dev);
@@ -1289,7 +1306,7 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
  *     Using Chain structure, and allocate Tx/Rx buffer
  */
 
-static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr)
+static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
 {
        struct uli526x_board_info *db = netdev_priv(dev);
        struct tx_desc *tmp_tx;
@@ -1304,14 +1321,14 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
        /* tx descriptor start pointer */
        db->tx_insert_ptr = db->first_tx_desc;
        db->tx_remove_ptr = db->first_tx_desc;
-       outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+       uw32(DCR4, db->first_tx_desc_dma);      /* TX DESC address */
 
        /* rx descriptor start pointer */
        db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
        db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
        db->rx_insert_ptr = db->first_rx_desc;
        db->rx_ready_ptr = db->first_rx_desc;
-       outl(db->first_rx_desc_dma, ioaddr + DCR3);     /* RX DESC address */
+       uw32(DCR3, db->first_rx_desc_dma);      /* RX DESC address */
 
        /* Init Transmit chain */
        tmp_buf = db->buf_pool_start;
@@ -1352,11 +1369,9 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
  *     Update CR6 value
  *     Firstly stop ULI526X, then written value and start
  */
-
-static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
 {
-
-       outl(cr6_data, ioaddr + DCR6);
+       uw32(DCR6, cr6_data);
        udelay(5);
 }
 
@@ -1375,6 +1390,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
 static void send_filter_frame(struct net_device *dev, int mc_cnt)
 {
        struct uli526x_board_info *db = netdev_priv(dev);
+       void __iomem *ioaddr = db->ioaddr;
        struct netdev_hw_addr *ha;
        struct tx_desc *txptr;
        u16 * addrptr;
@@ -1420,9 +1436,9 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
                /* Resource Empty */
                db->tx_packet_cnt++;
                txptr->tdes0 = cpu_to_le32(0x80000000);
-               update_cr6(db->cr6_data | 0x2000, dev->base_addr);
-               outl(0x1, dev->base_addr + DCR1);       /* Issue Tx polling */
-               update_cr6(db->cr6_data, dev->base_addr);
+               update_cr6(db->cr6_data | 0x2000, ioaddr);
+               uw32(DCR1, 0x1);        /* Issue Tx polling */
+               update_cr6(db->cr6_data, ioaddr);
                dev->trans_start = jiffies;
        } else
                netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
@@ -1465,37 +1481,38 @@ static void allocate_rx_buffer(struct net_device *dev)
  *     Read one word data from the serial ROM
  */
 
-static u16 read_srom_word(long ioaddr, int offset)
+static u16 read_srom_word(struct uli526x_board_info *db, int offset)
 {
-       int i;
+       void __iomem *ioaddr = db->ioaddr;
        u16 srom_data = 0;
-       long cr9_ioaddr = ioaddr + DCR9;
+       int i;
 
-       outl(CR9_SROM_READ, cr9_ioaddr);
-       outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+       uw32(DCR9, CR9_SROM_READ);
+       uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
        /* Send the Read Command 110b */
-       SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-       SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-       SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+       srom_clk_write(db, SROM_DATA_1);
+       srom_clk_write(db, SROM_DATA_1);
+       srom_clk_write(db, SROM_DATA_0);
 
        /* Send the offset */
        for (i = 5; i >= 0; i--) {
                srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
-               SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+               srom_clk_write(db, srom_data);
        }
 
-       outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+       uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
        for (i = 16; i > 0; i--) {
-               outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+               uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
                udelay(5);
-               srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
-               outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+               srom_data = (srom_data << 1) |
+                           ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
+               uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
                udelay(5);
        }
 
-       outl(CR9_SROM_READ, cr9_ioaddr);
+       uw32(DCR9, CR9_SROM_READ);
        return srom_data;
 }
 
@@ -1506,15 +1523,16 @@ static u16 read_srom_word(long ioaddr, int offset)
 
 static u8 uli526x_sense_speed(struct uli526x_board_info * db)
 {
+       struct uli_phy_ops *phy = &db->phy;
        u8 ErrFlag = 0;
        u16 phy_mode;
 
-       phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
-       phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+       phy_mode = phy->read(db, db->phy_addr, 1);
+       phy_mode = phy->read(db, db->phy_addr, 1);
 
        if ( (phy_mode & 0x24) == 0x24 ) {
 
-               phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
+               phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
                if(phy_mode&0x8000)
                        phy_mode = 0x8000;
                else if(phy_mode&0x4000)
@@ -1549,10 +1567,11 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
 
 static void uli526x_set_phyxcer(struct uli526x_board_info *db)
 {
+       struct uli_phy_ops *phy = &db->phy;
        u16 phy_reg;
 
        /* Phyxcer capability setting */
-       phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+       phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
 
        if (db->media_mode & ULI526X_AUTO) {
                /* AUTO Mode */
@@ -1573,10 +1592,10 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
                phy_reg|=db->PHY_reg4;
                db->media_mode|=ULI526X_AUTO;
        }
-       phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+       phy->write(db, db->phy_addr, 4, phy_reg);
 
        /* Restart Auto-Negotiation */
-       phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+       phy->write(db, db->phy_addr, 0, 0x1200);
        udelay(50);
 }
 
@@ -1590,6 +1609,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
 
 static void uli526x_process_mode(struct uli526x_board_info *db)
 {
+       struct uli_phy_ops *phy = &db->phy;
        u16 phy_reg;
 
        /* Full Duplex Mode Check */
@@ -1601,10 +1621,10 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
        update_cr6(db->cr6_data, db->ioaddr);
 
        /* 10/100M phyxcer force mode need */
-       if ( !(db->media_mode & 0x8)) {
+       if (!(db->media_mode & 0x8)) {
                /* Forece Mode */
-               phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
-               if ( !(phy_reg & 0x1) ) {
+               phy_reg = phy->read(db, db->phy_addr, 6);
+               if (!(phy_reg & 0x1)) {
                        /* parter without N-Way capability */
                        phy_reg = 0x0;
                        switch(db->op_mode) {
@@ -1613,148 +1633,126 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
                        case ULI526X_100MHF: phy_reg = 0x2000; break;
                        case ULI526X_100MFD: phy_reg = 0x2100; break;
                        }
-                       phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+                       phy->write(db, db->phy_addr, 0, phy_reg);
                }
        }
 }
 
 
-/*
- *     Write a word to Phy register
- */
-
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+/* M5261/M5263 Chip */
+static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
+                           u8 offset, u16 phy_data)
 {
        u16 i;
-       unsigned long ioaddr;
-
-       if(chip_id == PCI_ULI5263_ID)
-       {
-               phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
-               return;
-       }
-       /* M5261/M5263 Chip */
-       ioaddr = iobase + DCR9;
 
        /* Send 33 synchronization clock to Phy controller */
        for (i = 0; i < 35; i++)
-               phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+               phy_write_1bit(db, PHY_DATA_1);
 
        /* Send start command(01) to Phy */
-       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+       phy_write_1bit(db, PHY_DATA_0);
+       phy_write_1bit(db, PHY_DATA_1);
 
        /* Send write command(01) to Phy */
-       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+       phy_write_1bit(db, PHY_DATA_0);
+       phy_write_1bit(db, PHY_DATA_1);
 
        /* Send Phy address */
        for (i = 0x10; i > 0; i = i >> 1)
-               phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+               phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
 
        /* Send register address */
        for (i = 0x10; i > 0; i = i >> 1)
-               phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+               phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
 
        /* written trasnition */
-       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
-       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+       phy_write_1bit(db, PHY_DATA_1);
+       phy_write_1bit(db, PHY_DATA_0);
 
        /* Write a word data to PHY controller */
-       for ( i = 0x8000; i > 0; i >>= 1)
-               phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
-
+       for (i = 0x8000; i > 0; i >>= 1)
+               phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
 }
 
-
-/*
- *     Read a word data from phy register
- */
-
-static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
 {
-       int i;
        u16 phy_data;
-       unsigned long ioaddr;
-
-       if(chip_id == PCI_ULI5263_ID)
-               return phy_readby_cr10(iobase, phy_addr, offset);
-       /* M5261/M5263 Chip */
-       ioaddr = iobase + DCR9;
+       int i;
 
        /* Send 33 synchronization clock to Phy controller */
        for (i = 0; i < 35; i++)
-               phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+               phy_write_1bit(db, PHY_DATA_1);
 
        /* Send start command(01) to Phy */
-       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+       phy_write_1bit(db, PHY_DATA_0);
+       phy_write_1bit(db, PHY_DATA_1);
 
        /* Send read command(10) to Phy */
-       phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
-       phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+       phy_write_1bit(db, PHY_DATA_1);
+       phy_write_1bit(db, PHY_DATA_0);
 
        /* Send Phy address */
        for (i = 0x10; i > 0; i = i >> 1)
-               phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+               phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
 
        /* Send register address */
        for (i = 0x10; i > 0; i = i >> 1)
-               phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+               phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
 
        /* Skip transition state */
-       phy_read_1bit(ioaddr, chip_id);
+       phy_read_1bit(db);
 
        /* read 16bit data */
        for (phy_data = 0, i = 0; i < 16; i++) {
                phy_data <<= 1;
-               phy_data |= phy_read_1bit(ioaddr, chip_id);
+               phy_data |= phy_read_1bit(db);
        }
 
        return phy_data;
 }
 
-static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
+static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
+                          u8 offset)
 {
-       unsigned long ioaddr,cr10_value;
+       void __iomem *ioaddr = db->ioaddr;
+       u32 cr10_value = phy_addr;
 
-       ioaddr = iobase + DCR10;
-       cr10_value = phy_addr;
-       cr10_value = (cr10_value<<5) + offset;
-       cr10_value = (cr10_value<<16) + 0x08000000;
-       outl(cr10_value,ioaddr);
+       cr10_value = (cr10_value <<  5) + offset;
+       cr10_value = (cr10_value << 16) + 0x08000000;
+       uw32(DCR10, cr10_value);
        udelay(1);
-       while(1)
-       {
-               cr10_value = inl(ioaddr);
-               if(cr10_value&0x10000000)
+       while (1) {
+               cr10_value = ur32(DCR10);
+               if (cr10_value & 0x10000000)
                        break;
        }
        return cr10_value & 0x0ffff;
 }
 
-static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
+static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
+                            u8 offset, u16 phy_data)
 {
-       unsigned long ioaddr,cr10_value;
+       void __iomem *ioaddr = db->ioaddr;
+       u32 cr10_value = phy_addr;
 
-       ioaddr = iobase + DCR10;
-       cr10_value = phy_addr;
-       cr10_value = (cr10_value<<5) + offset;
-       cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
-       outl(cr10_value,ioaddr);
+       cr10_value = (cr10_value <<  5) + offset;
+       cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
+       uw32(DCR10, cr10_value);
        udelay(1);
 }
 /*
  *     Write one bit data to Phy Controller
  */
 
-static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
+static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
 {
-       outl(phy_data , ioaddr);                        /* MII Clock Low */
+       void __iomem *ioaddr = db->ioaddr;
+
+       uw32(DCR9, data);               /* MII Clock Low */
        udelay(1);
-       outl(phy_data  | MDCLKH, ioaddr);       /* MII Clock High */
+       uw32(DCR9, data | MDCLKH);      /* MII Clock High */
        udelay(1);
-       outl(phy_data , ioaddr);                        /* MII Clock Low */
+       uw32(DCR9, data);               /* MII Clock Low */
        udelay(1);
 }
 
@@ -1763,14 +1761,15 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
  *     Read one bit phy data from PHY controller
  */
 
-static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
+static u16 phy_read_1bit(struct uli526x_board_info *db)
 {
+       void __iomem *ioaddr = db->ioaddr;
        u16 phy_data;
 
-       outl(0x50000 , ioaddr);
+       uw32(DCR9, 0x50000);
        udelay(1);
-       phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
-       outl(0x40000 , ioaddr);
+       phy_data = (ur32(DCR9) >> 19) & 0x1;
+       uw32(DCR9, 0x40000);
        udelay(1);
 
        return phy_data;
index 2ac6fff0363a7172dbc218f28b62ccd6c8f3868a..4d1ffca83c82ffabf3e3f602fa6e30075a786e21 100644 (file)
@@ -400,9 +400,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
           No hold time required! */
        iowrite32(0x00000001, ioaddr + PCIBusCfg);
 
-       dev->base_addr = (unsigned long)ioaddr;
-       dev->irq = irq;
-
        np = netdev_priv(dev);
        np->pci_dev = pdev;
        np->chip_id = chip_idx;
@@ -635,17 +632,18 @@ static int netdev_open(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base_addr;
+       const int irq = np->pci_dev->irq;
        int i;
 
        iowrite32(0x00000001, ioaddr + PCIBusCfg);              /* Reset */
 
        netif_device_detach(dev);
-       i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+       i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
        if (i)
                goto out_err;
 
        if (debug > 1)
-               netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq);
+               netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
 
        if((i=alloc_ringdesc(dev)))
                goto out_err;
@@ -932,6 +930,7 @@ static void tx_timeout(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base_addr;
+       const int irq = np->pci_dev->irq;
 
        dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
                 ioread32(ioaddr + IntrStatus));
@@ -951,7 +950,7 @@ static void tx_timeout(struct net_device *dev)
               np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
        printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
 
-       disable_irq(dev->irq);
+       disable_irq(irq);
        spin_lock_irq(&np->lock);
        /*
         * Under high load dirty_tx and the internal tx descriptor pointer
@@ -966,7 +965,7 @@ static void tx_timeout(struct net_device *dev)
        init_rxtx_rings(dev);
        init_registers(dev);
        spin_unlock_irq(&np->lock);
-       enable_irq(dev->irq);
+       enable_irq(irq);
 
        netif_wake_queue(dev);
        dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1500,7 +1499,7 @@ static int netdev_close(struct net_device *dev)
        iowrite32(0x0000, ioaddr + IntrEnable);
        spin_unlock_irq(&np->lock);
 
-       free_irq(dev->irq, dev);
+       free_irq(np->pci_dev->irq, dev);
        wmb();
        netif_device_attach(dev);
 
@@ -1589,7 +1588,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
                iowrite32(0, ioaddr + IntrEnable);
                spin_unlock_irq(&np->lock);
 
-               synchronize_irq(dev->irq);
+               synchronize_irq(np->pci_dev->irq);
                netif_tx_disable(dev);
 
                np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
index fdb329fe6e8ea8564bb7afb5a5718388ca314164..138bf83bc98e346c66b2c90ff691512e49f539a8 100644 (file)
@@ -41,7 +41,9 @@ MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
 MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
 MODULE_LICENSE("GPL");
 
-
+#define xw32(reg, val) iowrite32(val, ioaddr + (reg))
+#define xr32(reg)      ioread32(ioaddr + (reg))
+#define xr8(reg)       ioread8(ioaddr + (reg))
 
 /* IO registers on the card, offsets */
 #define CSR0   0x00
@@ -83,7 +85,7 @@ struct xircom_private {
 
        struct sk_buff *tx_skb[4];
 
-       unsigned long io_port;
+       void __iomem *ioaddr;
        int open;
 
        /* transmit_used is the rotating counter that indicates which transmit
@@ -137,7 +139,7 @@ static int link_status(struct xircom_private *card);
 
 
 static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
-       {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
+       { PCI_VDEVICE(XIRCOM, 0x0003), },
        {0,},
 };
 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
@@ -146,9 +148,7 @@ static struct pci_driver xircom_ops = {
        .name           = "xircom_cb",
        .id_table       = xircom_pci_table,
        .probe          = xircom_probe,
-       .remove         = xircom_remove,
-       .suspend =NULL,
-       .resume =NULL
+       .remove         = __devexit_p(xircom_remove),
 };
 
 
@@ -192,15 +192,18 @@ static const struct net_device_ops netdev_ops = {
  */
 static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+       struct device *d = &pdev->dev;
        struct net_device *dev = NULL;
        struct xircom_private *private;
        unsigned long flags;
        unsigned short tmp16;
+       int rc;
 
        /* First do the PCI initialisation */
 
-       if (pci_enable_device(pdev))
-               return -ENODEV;
+       rc = pci_enable_device(pdev);
+       if (rc < 0)
+               goto out;
 
        /* disable all powermanagement */
        pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
@@ -211,11 +214,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
        pci_read_config_word (pdev,PCI_STATUS, &tmp16);
        pci_write_config_word (pdev, PCI_STATUS,tmp16);
 
-       if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
+       rc = pci_request_regions(pdev, "xircom_cb");
+       if (rc < 0) {
                pr_err("%s: failed to allocate io-region\n", __func__);
-               return -ENODEV;
+               goto err_disable;
        }
 
+       rc = -ENOMEM;
        /*
           Before changing the hardware, allocate the memory.
           This way, we can fail gracefully if not enough memory
@@ -223,17 +228,21 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
         */
        dev = alloc_etherdev(sizeof(struct xircom_private));
        if (!dev)
-               goto device_fail;
+               goto err_release;
 
        private = netdev_priv(dev);
 
        /* Allocate the send/receive buffers */
-       private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
+       private->rx_buffer = dma_alloc_coherent(d, 8192,
+                                               &private->rx_dma_handle,
+                                               GFP_KERNEL);
        if (private->rx_buffer == NULL) {
                pr_err("%s: no memory for rx buffer\n", __func__);
                goto rx_buf_fail;
        }
-       private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
+       private->tx_buffer = dma_alloc_coherent(d, 8192,
+                                               &private->tx_dma_handle,
+                                               GFP_KERNEL);
        if (private->tx_buffer == NULL) {
                pr_err("%s: no memory for tx buffer\n", __func__);
                goto tx_buf_fail;
@@ -244,10 +253,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
 
        private->dev = dev;
        private->pdev = pdev;
-       private->io_port = pci_resource_start(pdev, 0);
+
+       /* IO range. */
+       private->ioaddr = pci_iomap(pdev, 0, 0);
+       if (!private->ioaddr)
+               goto reg_fail;
+
        spin_lock_init(&private->lock);
-       dev->irq = pdev->irq;
-       dev->base_addr = private->io_port;
 
        initialize_card(private);
        read_mac_address(private);
@@ -256,9 +268,10 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
        dev->netdev_ops = &netdev_ops;
        pci_set_drvdata(pdev, dev);
 
-       if (register_netdev(dev)) {
+       rc = register_netdev(dev);
+       if (rc < 0) {
                pr_err("%s: netdevice registration failed\n", __func__);
-               goto reg_fail;
+               goto err_unmap;
        }
 
        netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
@@ -273,17 +286,23 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
        spin_unlock_irqrestore(&private->lock,flags);
 
        trigger_receive(private);
+out:
+       return rc;
 
-       return 0;
-
+err_unmap:
+       pci_iounmap(pdev, private->ioaddr);
 reg_fail:
-       kfree(private->tx_buffer);
+       pci_set_drvdata(pdev, NULL);
+       dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
 tx_buf_fail:
-       kfree(private->rx_buffer);
+       dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
 rx_buf_fail:
        free_netdev(dev);
-device_fail:
-       return -ENODEV;
+err_release:
+       pci_release_regions(pdev);
+err_disable:
+       pci_disable_device(pdev);
+       goto out;
 }
 
 
@@ -297,25 +316,28 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct xircom_private *card = netdev_priv(dev);
+       struct device *d = &pdev->dev;
 
-       pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
-       pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
-
-       release_region(dev->base_addr, 128);
        unregister_netdev(dev);
-       free_netdev(dev);
+       pci_iounmap(pdev, card->ioaddr);
        pci_set_drvdata(pdev, NULL);
+       dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
+       dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
+       free_netdev(dev);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
 }
 
 static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
 {
        struct net_device *dev = (struct net_device *) dev_instance;
        struct xircom_private *card = netdev_priv(dev);
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int status;
        int i;
 
        spin_lock(&card->lock);
-       status = inl(card->io_port+CSR5);
+       status = xr32(CSR5);
 
 #if defined DEBUG && DEBUG > 1
        print_binary(status);
@@ -345,7 +367,7 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
        /* Clear all remaining interrupts */
        status |= 0xffffffff; /* FIXME: make this clear only the
                                        real existing bits */
-       outl(status,card->io_port+CSR5);
+       xw32(CSR5, status);
 
 
        for (i=0;i<NUMDESCRIPTORS;i++)
@@ -423,11 +445,11 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
 static int xircom_open(struct net_device *dev)
 {
        struct xircom_private *xp = netdev_priv(dev);
+       const int irq = xp->pdev->irq;
        int retval;
 
-       netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
-                   dev->irq);
-       retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
+       netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq);
+       retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
        if (retval)
                return retval;
 
@@ -459,7 +481,7 @@ static int xircom_close(struct net_device *dev)
        spin_unlock_irqrestore(&card->lock,flags);
 
        card->open = 0;
-       free_irq(dev->irq,dev);
+       free_irq(card->pdev->irq, dev);
 
        return 0;
 
@@ -469,35 +491,39 @@ static int xircom_close(struct net_device *dev)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xircom_poll_controller(struct net_device *dev)
 {
-       disable_irq(dev->irq);
-       xircom_interrupt(dev->irq, dev);
-       enable_irq(dev->irq);
+       struct xircom_private *xp = netdev_priv(dev);
+       const int irq = xp->pdev->irq;
+
+       disable_irq(irq);
+       xircom_interrupt(irq, dev);
+       enable_irq(irq);
 }
 #endif
 
 
 static void initialize_card(struct xircom_private *card)
 {
-       unsigned int val;
+       void __iomem *ioaddr = card->ioaddr;
        unsigned long flags;
+       u32 val;
 
        spin_lock_irqsave(&card->lock, flags);
 
        /* First: reset the card */
-       val = inl(card->io_port + CSR0);
+       val = xr32(CSR0);
        val |= 0x01;            /* Software reset */
-       outl(val, card->io_port + CSR0);
+       xw32(CSR0, val);
 
        udelay(100);            /* give the card some time to reset */
 
-       val = inl(card->io_port + CSR0);
+       val = xr32(CSR0);
        val &= ~0x01;           /* disable Software reset */
-       outl(val, card->io_port + CSR0);
+       xw32(CSR0, val);
 
 
        val = 0;                /* Value 0x00 is a safe and conservative value
                                   for the PCI configuration settings */
-       outl(val, card->io_port + CSR0);
+       xw32(CSR0, val);
 
 
        disable_all_interrupts(card);
@@ -515,10 +541,9 @@ ignored; I chose zero.
 */
 static void trigger_transmit(struct xircom_private *card)
 {
-       unsigned int val;
+       void __iomem *ioaddr = card->ioaddr;
 
-       val = 0;
-       outl(val, card->io_port + CSR1);
+       xw32(CSR1, 0);
 }
 
 /*
@@ -530,10 +555,9 @@ ignored; I chose zero.
 */
 static void trigger_receive(struct xircom_private *card)
 {
-       unsigned int val;
+       void __iomem *ioaddr = card->ioaddr;
 
-       val = 0;
-       outl(val, card->io_port + CSR2);
+       xw32(CSR2, 0);
 }
 
 /*
@@ -542,6 +566,7 @@ descriptors and programs the addresses into the card.
 */
 static void setup_descriptors(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        u32 address;
        int i;
 
@@ -571,7 +596,7 @@ static void setup_descriptors(struct xircom_private *card)
        wmb();
        /* Write the receive descriptor ring address to the card */
        address = card->rx_dma_handle;
-       outl(address, card->io_port + CSR3);    /* Receive descr list address */
+       xw32(CSR3, address);    /* Receive descr list address */
 
 
        /* transmit descriptors */
@@ -596,7 +621,7 @@ static void setup_descriptors(struct xircom_private *card)
        wmb();
        /* wite the transmit descriptor ring to the card */
        address = card->tx_dma_handle;
-       outl(address, card->io_port + CSR4);    /* xmit descr list address */
+       xw32(CSR4, address);    /* xmit descr list address */
 }
 
 /*
@@ -605,11 +630,12 @@ valid by setting the address in the card to 0x00.
 */
 static void remove_descriptors(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
 
        val = 0;
-       outl(val, card->io_port + CSR3);        /* Receive descriptor address */
-       outl(val, card->io_port + CSR4);        /* Send descriptor address */
+       xw32(CSR3, val);        /* Receive descriptor address */
+       xw32(CSR4, val);        /* Send descriptor address */
 }
 
 /*
@@ -620,17 +646,17 @@ This function also clears the status-bit.
 */
 static int link_status_changed(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
 
-       val = inl(card->io_port + CSR5);        /* Status register */
-
-       if ((val & (1 << 27)) == 0)             /* no change */
+       val = xr32(CSR5);       /* Status register */
+       if (!(val & (1 << 27))) /* no change */
                return 0;
 
        /* clear the event by writing a 1 to the bit in the
           status register. */
        val = (1 << 27);
-       outl(val, card->io_port + CSR5);
+       xw32(CSR5, val);
 
        return 1;
 }
@@ -642,11 +668,9 @@ in a non-stopped state.
 */
 static int transmit_active(struct xircom_private *card)
 {
-       unsigned int val;
-
-       val = inl(card->io_port + CSR5);        /* Status register */
+       void __iomem *ioaddr = card->ioaddr;
 
-       if ((val & (7 << 20)) == 0)             /* transmitter disabled */
+       if (!(xr32(CSR5) & (7 << 20)))  /* transmitter disabled */
                return 0;
 
        return 1;
@@ -658,11 +682,9 @@ in a non-stopped state.
 */
 static int receive_active(struct xircom_private *card)
 {
-       unsigned int val;
-
-       val = inl(card->io_port + CSR5);        /* Status register */
+       void __iomem *ioaddr = card->ioaddr;
 
-       if ((val & (7 << 17)) == 0)             /* receiver disabled */
+       if (!(xr32(CSR5) & (7 << 17)))  /* receiver disabled */
                return 0;
 
        return 1;
@@ -680,10 +702,11 @@ must be called with the lock held and interrupts disabled.
 */
 static void activate_receiver(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
        int counter;
 
-       val = inl(card->io_port + CSR6);        /* Operation mode */
+       val = xr32(CSR6);       /* Operation mode */
 
        /* If the "active" bit is set and the receiver is already
           active, no need to do the expensive thing */
@@ -692,7 +715,7 @@ static void activate_receiver(struct xircom_private *card)
 
 
        val = val & ~2;         /* disable the receiver */
-       outl(val, card->io_port + CSR6);
+       xw32(CSR6, val);
 
        counter = 10;
        while (counter > 0) {
@@ -706,9 +729,9 @@ static void activate_receiver(struct xircom_private *card)
        }
 
        /* enable the receiver */
-       val = inl(card->io_port + CSR6);        /* Operation mode */
-       val = val | 2;                          /* enable the receiver */
-       outl(val, card->io_port + CSR6);
+       val = xr32(CSR6);       /* Operation mode */
+       val = val | 2;          /* enable the receiver */
+       xw32(CSR6, val);
 
        /* now wait for the card to activate again */
        counter = 10;
@@ -733,12 +756,13 @@ must be called with the lock held and interrupts disabled.
 */
 static void deactivate_receiver(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
        int counter;
 
-       val = inl(card->io_port + CSR6);        /* Operation mode */
-       val = val & ~2;                         /* disable the receiver */
-       outl(val, card->io_port + CSR6);
+       val = xr32(CSR6);       /* Operation mode */
+       val = val & ~2;         /* disable the receiver */
+       xw32(CSR6, val);
 
        counter = 10;
        while (counter > 0) {
@@ -765,10 +789,11 @@ must be called with the lock held and interrupts disabled.
 */
 static void activate_transmitter(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
        int counter;
 
-       val = inl(card->io_port + CSR6);        /* Operation mode */
+       val = xr32(CSR6);       /* Operation mode */
 
        /* If the "active" bit is set and the receiver is already
           active, no need to do the expensive thing */
@@ -776,7 +801,7 @@ static void activate_transmitter(struct xircom_private *card)
                return;
 
        val = val & ~(1 << 13); /* disable the transmitter */
-       outl(val, card->io_port + CSR6);
+       xw32(CSR6, val);
 
        counter = 10;
        while (counter > 0) {
@@ -791,9 +816,9 @@ static void activate_transmitter(struct xircom_private *card)
        }
 
        /* enable the transmitter */
-       val = inl(card->io_port + CSR6);        /* Operation mode */
+       val = xr32(CSR6);       /* Operation mode */
        val = val | (1 << 13);  /* enable the transmitter */
-       outl(val, card->io_port + CSR6);
+       xw32(CSR6, val);
 
        /* now wait for the card to activate again */
        counter = 10;
@@ -818,12 +843,13 @@ must be called with the lock held and interrupts disabled.
 */
 static void deactivate_transmitter(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
        int counter;
 
-       val = inl(card->io_port + CSR6);        /* Operation mode */
+       val = xr32(CSR6);       /* Operation mode */
        val = val & ~2;         /* disable the transmitter */
-       outl(val, card->io_port + CSR6);
+       xw32(CSR6, val);
 
        counter = 20;
        while (counter > 0) {
@@ -846,11 +872,12 @@ must be called with the lock held and interrupts disabled.
 */
 static void enable_transmit_interrupt(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
 
-       val = inl(card->io_port + CSR7);        /* Interrupt enable register */
-       val |= 1;                               /* enable the transmit interrupt */
-       outl(val, card->io_port + CSR7);
+       val = xr32(CSR7);       /* Interrupt enable register */
+       val |= 1;               /* enable the transmit interrupt */
+       xw32(CSR7, val);
 }
 
 
@@ -861,11 +888,12 @@ must be called with the lock held and interrupts disabled.
 */
 static void enable_receive_interrupt(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
 
-       val = inl(card->io_port + CSR7);        /* Interrupt enable register */
-       val = val | (1 << 6);                   /* enable the receive interrupt */
-       outl(val, card->io_port + CSR7);
+       val = xr32(CSR7);       /* Interrupt enable register */
+       val = val | (1 << 6);   /* enable the receive interrupt */
+       xw32(CSR7, val);
 }
 
 /*
@@ -875,11 +903,12 @@ must be called with the lock held and interrupts disabled.
 */
 static void enable_link_interrupt(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
 
-       val = inl(card->io_port + CSR7);        /* Interrupt enable register */
-       val = val | (1 << 27);                  /* enable the link status chage interrupt */
-       outl(val, card->io_port + CSR7);
+       val = xr32(CSR7);       /* Interrupt enable register */
+       val = val | (1 << 27);  /* enable the link status chage interrupt */
+       xw32(CSR7, val);
 }
 
 
@@ -891,10 +920,9 @@ must be called with the lock held and interrupts disabled.
 */
 static void disable_all_interrupts(struct xircom_private *card)
 {
-       unsigned int val;
+       void __iomem *ioaddr = card->ioaddr;
 
-       val = 0;                                /* disable all interrupts */
-       outl(val, card->io_port + CSR7);
+       xw32(CSR7, 0);
 }
 
 /*
@@ -904,9 +932,10 @@ must be called with the lock held and interrupts disabled.
 */
 static void enable_common_interrupts(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
 
-       val = inl(card->io_port + CSR7);        /* Interrupt enable register */
+       val = xr32(CSR7);       /* Interrupt enable register */
        val |= (1<<16); /* Normal Interrupt Summary */
        val |= (1<<15); /* Abnormal Interrupt Summary */
        val |= (1<<13); /* Fatal bus error */
@@ -915,7 +944,7 @@ static void enable_common_interrupts(struct xircom_private *card)
        val |= (1<<5);  /* Transmit Underflow */
        val |= (1<<2);  /* Transmit Buffer Unavailable */
        val |= (1<<1);  /* Transmit Process Stopped */
-       outl(val, card->io_port + CSR7);
+       xw32(CSR7, val);
 }
 
 /*
@@ -925,11 +954,12 @@ must be called with the lock held and interrupts disabled.
 */
 static int enable_promisc(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned int val;
 
-       val = inl(card->io_port + CSR6);
+       val = xr32(CSR6);
        val = val | (1 << 6);
-       outl(val, card->io_port + CSR6);
+       xw32(CSR6, val);
 
        return 1;
 }
@@ -944,13 +974,16 @@ Must be called in locked state with interrupts disabled
 */
 static int link_status(struct xircom_private *card)
 {
-       unsigned int val;
+       void __iomem *ioaddr = card->ioaddr;
+       u8 val;
 
-       val = inb(card->io_port + CSR12);
+       val = xr8(CSR12);
 
-       if (!(val&(1<<2)))  /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+       /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+       if (!(val & (1 << 2)))
                return 10;
-       if (!(val&(1<<1)))  /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+       /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+       if (!(val & (1 << 1)))
                return 100;
 
        /* If we get here -> no link at all */
@@ -969,29 +1002,31 @@ static int link_status(struct xircom_private *card)
  */
 static void read_mac_address(struct xircom_private *card)
 {
-       unsigned char j, tuple, link, data_id, data_count;
+       void __iomem *ioaddr = card->ioaddr;
        unsigned long flags;
+       u8 link;
        int i;
 
        spin_lock_irqsave(&card->lock, flags);
 
-       outl(1 << 12, card->io_port + CSR9);    /* enable boot rom access */
+       xw32(CSR9, 1 << 12);    /* enable boot rom access */
        for (i = 0x100; i < 0x1f7; i += link + 2) {
-               outl(i, card->io_port + CSR10);
-               tuple = inl(card->io_port + CSR9) & 0xff;
-               outl(i + 1, card->io_port + CSR10);
-               link = inl(card->io_port + CSR9) & 0xff;
-               outl(i + 2, card->io_port + CSR10);
-               data_id = inl(card->io_port + CSR9) & 0xff;
-               outl(i + 3, card->io_port + CSR10);
-               data_count = inl(card->io_port + CSR9) & 0xff;
+               u8 tuple, data_id, data_count;
+
+               xw32(CSR10, i);
+               tuple = xr32(CSR9);
+               xw32(CSR10, i + 1);
+               link = xr32(CSR9);
+               xw32(CSR10, i + 2);
+               data_id = xr32(CSR9);
+               xw32(CSR10, i + 3);
+               data_count = xr32(CSR9);
                if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
-                       /*
-                        * This is it.  We have the data we want.
-                        */
+                       int j;
+
                        for (j = 0; j < 6; j++) {
-                               outl(i + j + 4, card->io_port + CSR10);
-                               card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff;
+                               xw32(CSR10, i + j + 4);
+                               card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
                        }
                        break;
                } else if (link == 0) {
@@ -1010,6 +1045,7 @@ static void read_mac_address(struct xircom_private *card)
  */
 static void transceiver_voodoo(struct xircom_private *card)
 {
+       void __iomem *ioaddr = card->ioaddr;
        unsigned long flags;
 
        /* disable all powermanagement */
@@ -1019,14 +1055,14 @@ static void transceiver_voodoo(struct xircom_private *card)
 
        spin_lock_irqsave(&card->lock, flags);
 
-       outl(0x0008, card->io_port + CSR15);
-        udelay(25);
-        outl(0xa8050000, card->io_port + CSR15);
-        udelay(25);
-        outl(0xa00f0000, card->io_port + CSR15);
-        udelay(25);
+       xw32(CSR15, 0x0008);
+       udelay(25);
+       xw32(CSR15, 0xa8050000);
+       udelay(25);
+       xw32(CSR15, 0xa00f0000);
+       udelay(25);
 
-        spin_unlock_irqrestore(&card->lock, flags);
+       spin_unlock_irqrestore(&card->lock, flags);
 
        netif_start_queue(card->dev);
 }
index b2dc2c81a147f66581d1302f1b8eb1ee92346288..ef4499d2ee4b059e679b6495e36537233705055e 100644 (file)
 #include "dl2k.h"
 #include <linux/dma-mapping.h>
 
+#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
+#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
+#define dw8(reg, val)  iowrite8(val, ioaddr + (reg))
+#define dr32(reg)      ioread32(ioaddr + (reg))
+#define dr16(reg)      ioread16(ioaddr + (reg))
+#define dr8(reg)       ioread8(ioaddr + (reg))
+
 static char version[] __devinitdata =
       KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
 #define MAX_UNITS 8
@@ -49,8 +56,13 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
 /* Enable the default interrupts */
 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
        UpdateStats | LinkEvent)
-#define EnableInt() \
-writew(DEFAULT_INTR, ioaddr + IntEnable)
+
+static void dl2k_enable_int(struct netdev_private *np)
+{
+       void __iomem *ioaddr = np->ioaddr;
+
+       dw16(IntEnable, DEFAULT_INTR);
+}
 
 static const int max_intrloop = 50;
 static const int multicast_filter_limit = 0x40;
@@ -73,7 +85,7 @@ static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
 static int rio_close (struct net_device *dev);
 static int find_miiphy (struct net_device *dev);
 static int parse_eeprom (struct net_device *dev);
-static int read_eeprom (long ioaddr, int eep_addr);
+static int read_eeprom (struct netdev_private *, int eep_addr);
 static int mii_wait_link (struct net_device *dev, int wait);
 static int mii_set_media (struct net_device *dev);
 static int mii_get_media (struct net_device *dev);
@@ -106,7 +118,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
        static int card_idx;
        int chip_idx = ent->driver_data;
        int err, irq;
-       long ioaddr;
+       void __iomem *ioaddr;
        static int version_printed;
        void *ring_space;
        dma_addr_t ring_dma;
@@ -124,26 +136,29 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_disable;
 
        pci_set_master (pdev);
+
+       err = -ENOMEM;
+
        dev = alloc_etherdev (sizeof (*np));
-       if (!dev) {
-               err = -ENOMEM;
+       if (!dev)
                goto err_out_res;
-       }
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-#ifdef MEM_MAPPING
-       ioaddr = pci_resource_start (pdev, 1);
-       ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
-       if (!ioaddr) {
-               err = -ENOMEM;
+       np = netdev_priv(dev);
+
+       /* IO registers range. */
+       ioaddr = pci_iomap(pdev, 0, 0);
+       if (!ioaddr)
                goto err_out_dev;
-       }
-#else
-       ioaddr = pci_resource_start (pdev, 0);
+       np->eeprom_addr = ioaddr;
+
+#ifdef MEM_MAPPING
+       /* MM registers range. */
+       ioaddr = pci_iomap(pdev, 1, 0);
+       if (!ioaddr)
+               goto err_out_iounmap;
 #endif
-       dev->base_addr = ioaddr;
-       dev->irq = irq;
-       np = netdev_priv(dev);
+       np->ioaddr = ioaddr;
        np->chip_id = chip_idx;
        np->pdev = pdev;
        spin_lock_init (&np->tx_lock);
@@ -239,7 +254,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_unmap_rx;
 
        /* Fiber device? */
-       np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
+       np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
        np->link_status = 0;
        /* Set media and reset PHY */
        if (np->phy_media) {
@@ -276,22 +291,20 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
                printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
        return 0;
 
-      err_out_unmap_rx:
+err_out_unmap_rx:
        pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
-      err_out_unmap_tx:
+err_out_unmap_tx:
        pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
-      err_out_iounmap:
+err_out_iounmap:
 #ifdef MEM_MAPPING
-       iounmap ((void *) ioaddr);
-
-      err_out_dev:
+       pci_iounmap(pdev, np->ioaddr);
 #endif
+       pci_iounmap(pdev, np->eeprom_addr);
+err_out_dev:
        free_netdev (dev);
-
-      err_out_res:
+err_out_res:
        pci_release_regions (pdev);
-
-      err_out_disable:
+err_out_disable:
        pci_disable_device (pdev);
        return err;
 }
@@ -299,11 +312,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
 static int
 find_miiphy (struct net_device *dev)
 {
+       struct netdev_private *np = netdev_priv(dev);
        int i, phy_found = 0;
-       struct netdev_private *np;
-       long ioaddr;
        np = netdev_priv(dev);
-       ioaddr = dev->base_addr;
        np->phy_addr = 1;
 
        for (i = 31; i >= 0; i--) {
@@ -323,26 +334,19 @@ find_miiphy (struct net_device *dev)
 static int
 parse_eeprom (struct net_device *dev)
 {
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        int i, j;
-       long ioaddr = dev->base_addr;
        u8 sromdata[256];
        u8 *psib;
        u32 crc;
        PSROM_t psrom = (PSROM_t) sromdata;
-       struct netdev_private *np = netdev_priv(dev);
 
        int cid, next;
 
-#ifdef MEM_MAPPING
-       ioaddr = pci_resource_start (np->pdev, 0);
-#endif
-       /* Read eeprom */
-       for (i = 0; i < 128; i++) {
-               ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
-       }
-#ifdef MEM_MAPPING
-       ioaddr = dev->base_addr;
-#endif
+       for (i = 0; i < 128; i++)
+               ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
+
        if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {  /* D-Link Only */
                /* Check CRC */
                crc = ~ether_crc_le (256 - 4, sromdata);
@@ -378,8 +382,7 @@ parse_eeprom (struct net_device *dev)
                        return 0;
                case 2: /* Duplex Polarity */
                        np->duplex_polarity = psib[i];
-                       writeb (readb (ioaddr + PhyCtrl) | psib[i],
-                               ioaddr + PhyCtrl);
+                       dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
                        break;
                case 3: /* Wake Polarity */
                        np->wake_polarity = psib[i];
@@ -407,59 +410,57 @@ static int
 rio_open (struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = np->ioaddr;
+       const int irq = np->pdev->irq;
        int i;
        u16 macctrl;
 
-       i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
+       i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
        if (i)
                return i;
 
        /* Reset all logic functions */
-       writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
-               ioaddr + ASICCtrl + 2);
+       dw16(ASICCtrl + 2,
+            GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
        mdelay(10);
 
        /* DebugCtrl bit 4, 5, 9 must set */
-       writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
+       dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
 
        /* Jumbo frame */
        if (np->jumbo != 0)
-               writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
+               dw16(MaxFrameSize, MAX_JUMBO+14);
 
        alloc_list (dev);
 
        /* Get station address */
        for (i = 0; i < 6; i++)
-               writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
+               dw8(StationAddr0 + i, dev->dev_addr[i]);
 
        set_multicast (dev);
        if (np->coalesce) {
-               writel (np->rx_coalesce | np->rx_timeout << 16,
-                       ioaddr + RxDMAIntCtrl);
+               dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
        }
        /* Set RIO to poll every N*320nsec. */
-       writeb (0x20, ioaddr + RxDMAPollPeriod);
-       writeb (0xff, ioaddr + TxDMAPollPeriod);
-       writeb (0x30, ioaddr + RxDMABurstThresh);
-       writeb (0x30, ioaddr + RxDMAUrgentThresh);
-       writel (0x0007ffff, ioaddr + RmonStatMask);
+       dw8(RxDMAPollPeriod, 0x20);
+       dw8(TxDMAPollPeriod, 0xff);
+       dw8(RxDMABurstThresh, 0x30);
+       dw8(RxDMAUrgentThresh, 0x30);
+       dw32(RmonStatMask, 0x0007ffff);
        /* clear statistics */
        clear_stats (dev);
 
        /* VLAN supported */
        if (np->vlan) {
                /* priority field in RxDMAIntCtrl  */
-               writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
-                       ioaddr + RxDMAIntCtrl);
+               dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
                /* VLANId */
-               writew (np->vlan, ioaddr + VLANId);
+               dw16(VLANId, np->vlan);
                /* Length/Type should be 0x8100 */
-               writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
+               dw32(VLANTag, 0x8100 << 16 | np->vlan);
                /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
                   VLAN information tagged by TFC' VID, CFI fields. */
-               writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
-                       ioaddr + MACCtrl);
+               dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
        }
 
        init_timer (&np->timer);
@@ -469,20 +470,18 @@ rio_open (struct net_device *dev)
        add_timer (&np->timer);
 
        /* Start Tx/Rx */
-       writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
-                       ioaddr + MACCtrl);
+       dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
 
        macctrl = 0;
        macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
        macctrl |= (np->full_duplex) ? DuplexSelect : 0;
        macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
        macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
-       writew(macctrl, ioaddr + MACCtrl);
+       dw16(MACCtrl, macctrl);
 
        netif_start_queue (dev);
 
-       /* Enable default interrupts */
-       EnableInt ();
+       dl2k_enable_int(np);
        return 0;
 }
 
@@ -533,10 +532,11 @@ rio_timer (unsigned long data)
 static void
 rio_tx_timeout (struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
 
        printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
-               dev->name, readl (ioaddr + TxStatus));
+               dev->name, dr32(TxStatus));
        rio_free_tx(dev, 0);
        dev->if_port = 0;
        dev->trans_start = jiffies; /* prevent tx timeout */
@@ -547,6 +547,7 @@ static void
 alloc_list (struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        int i;
 
        np->cur_rx = np->cur_tx = 0;
@@ -594,24 +595,23 @@ alloc_list (struct net_device *dev)
        }
 
        /* Set RFDListPtr */
-       writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
-       writel (0, dev->base_addr + RFDListPtr1);
+       dw32(RFDListPtr0, np->rx_ring_dma);
+       dw32(RFDListPtr1, 0);
 }
 
 static netdev_tx_t
 start_xmit (struct sk_buff *skb, struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        struct netdev_desc *txdesc;
        unsigned entry;
-       u32 ioaddr;
        u64 tfc_vlan_tag = 0;
 
        if (np->link_status == 0) {     /* Link Down */
                dev_kfree_skb(skb);
                return NETDEV_TX_OK;
        }
-       ioaddr = dev->base_addr;
        entry = np->cur_tx % TX_RING_SIZE;
        np->tx_skbuff[entry] = skb;
        txdesc = &np->tx_ring[entry];
@@ -646,9 +646,9 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
                                              (1 << FragCountShift));
 
        /* TxDMAPollNow */
-       writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
+       dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
        /* Schedule ISR */
-       writel(10000, ioaddr + CountDown);
+       dw32(CountDown, 10000);
        np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
        if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
                        < TX_QUEUE_LEN - 1 && np->speed != 10) {
@@ -658,10 +658,10 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
        }
 
        /* The first TFDListPtr */
-       if (readl (dev->base_addr + TFDListPtr0) == 0) {
-               writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
-                       dev->base_addr + TFDListPtr0);
-               writel (0, dev->base_addr + TFDListPtr1);
+       if (!dr32(TFDListPtr0)) {
+               dw32(TFDListPtr0, np->tx_ring_dma +
+                    entry * sizeof (struct netdev_desc));
+               dw32(TFDListPtr1, 0);
        }
 
        return NETDEV_TX_OK;
@@ -671,17 +671,15 @@ static irqreturn_t
 rio_interrupt (int irq, void *dev_instance)
 {
        struct net_device *dev = dev_instance;
-       struct netdev_private *np;
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        unsigned int_status;
-       long ioaddr;
        int cnt = max_intrloop;
        int handled = 0;
 
-       ioaddr = dev->base_addr;
-       np = netdev_priv(dev);
        while (1) {
-               int_status = readw (ioaddr + IntStatus);
-               writew (int_status, ioaddr + IntStatus);
+               int_status = dr16(IntStatus);
+               dw16(IntStatus, int_status);
                int_status &= DEFAULT_INTR;
                if (int_status == 0 || --cnt < 0)
                        break;
@@ -692,7 +690,7 @@ rio_interrupt (int irq, void *dev_instance)
                /* TxDMAComplete interrupt */
                if ((int_status & (TxDMAComplete|IntRequested))) {
                        int tx_status;
-                       tx_status = readl (ioaddr + TxStatus);
+                       tx_status = dr32(TxStatus);
                        if (tx_status & 0x01)
                                tx_error (dev, tx_status);
                        /* Free used tx skbuffs */
@@ -705,7 +703,7 @@ rio_interrupt (int irq, void *dev_instance)
                        rio_error (dev, int_status);
        }
        if (np->cur_tx != np->old_tx)
-               writel (100, ioaddr + CountDown);
+               dw32(CountDown, 100);
        return IRQ_RETVAL(handled);
 }
 
@@ -765,13 +763,11 @@ rio_free_tx (struct net_device *dev, int irq)
 static void
 tx_error (struct net_device *dev, int tx_status)
 {
-       struct netdev_private *np;
-       long ioaddr = dev->base_addr;
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        int frame_id;
        int i;
 
-       np = netdev_priv(dev);
-
        frame_id = (tx_status & 0xffff0000);
        printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
                dev->name, tx_status, frame_id);
@@ -779,23 +775,21 @@ tx_error (struct net_device *dev, int tx_status)
        /* Ttransmit Underrun */
        if (tx_status & 0x10) {
                np->stats.tx_fifo_errors++;
-               writew (readw (ioaddr + TxStartThresh) + 0x10,
-                       ioaddr + TxStartThresh);
+               dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
                /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
-               writew (TxReset | DMAReset | FIFOReset | NetworkReset,
-                       ioaddr + ASICCtrl + 2);
+               dw16(ASICCtrl + 2,
+                    TxReset | DMAReset | FIFOReset | NetworkReset);
                /* Wait for ResetBusy bit clear */
                for (i = 50; i > 0; i--) {
-                       if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+                       if (!(dr16(ASICCtrl + 2) & ResetBusy))
                                break;
                        mdelay (1);
                }
                rio_free_tx (dev, 1);
                /* Reset TFDListPtr */
-               writel (np->tx_ring_dma +
-                       np->old_tx * sizeof (struct netdev_desc),
-                       dev->base_addr + TFDListPtr0);
-               writel (0, dev->base_addr + TFDListPtr1);
+               dw32(TFDListPtr0, np->tx_ring_dma +
+                    np->old_tx * sizeof (struct netdev_desc));
+               dw32(TFDListPtr1, 0);
 
                /* Let TxStartThresh stay default value */
        }
@@ -803,10 +797,10 @@ tx_error (struct net_device *dev, int tx_status)
        if (tx_status & 0x04) {
                np->stats.tx_fifo_errors++;
                /* TxReset and clear FIFO */
-               writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
+               dw16(ASICCtrl + 2, TxReset | FIFOReset);
                /* Wait reset done */
                for (i = 50; i > 0; i--) {
-                       if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+                       if (!(dr16(ASICCtrl + 2) & ResetBusy))
                                break;
                        mdelay (1);
                }
@@ -821,7 +815,7 @@ tx_error (struct net_device *dev, int tx_status)
                np->stats.collisions++;
 #endif
        /* Restart the Tx */
-       writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
+       dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
 }
 
 static int
@@ -931,8 +925,8 @@ receive_packet (struct net_device *dev)
 static void
 rio_error (struct net_device *dev, int int_status)
 {
-       long ioaddr = dev->base_addr;
        struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        u16 macctrl;
 
        /* Link change event */
@@ -954,7 +948,7 @@ rio_error (struct net_device *dev, int int_status)
                                TxFlowControlEnable : 0;
                        macctrl |= (np->rx_flow) ?
                                RxFlowControlEnable : 0;
-                       writew(macctrl, ioaddr + MACCtrl);
+                       dw16(MACCtrl, macctrl);
                        np->link_status = 1;
                        netif_carrier_on(dev);
                } else {
@@ -974,7 +968,7 @@ rio_error (struct net_device *dev, int int_status)
        if (int_status & HostError) {
                printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
                        dev->name, int_status);
-               writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
+               dw16(ASICCtrl + 2, GlobalReset | HostReset);
                mdelay (500);
        }
 }
@@ -982,8 +976,8 @@ rio_error (struct net_device *dev, int int_status)
 static struct net_device_stats *
 get_stats (struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
        struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
 #ifdef MEM_MAPPING
        int i;
 #endif
@@ -992,106 +986,107 @@ get_stats (struct net_device *dev)
        /* All statistics registers need to be acknowledged,
           else statistic overflow could cause problems */
 
-       np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
-       np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
-       np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
-       np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
+       np->stats.rx_packets += dr32(FramesRcvOk);
+       np->stats.tx_packets += dr32(FramesXmtOk);
+       np->stats.rx_bytes += dr32(OctetRcvOk);
+       np->stats.tx_bytes += dr32(OctetXmtOk);
 
-       np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
-       np->stats.collisions += readl (ioaddr + SingleColFrames)
-                            +  readl (ioaddr + MultiColFrames);
+       np->stats.multicast = dr32(McstFramesRcvdOk);
+       np->stats.collisions += dr32(SingleColFrames)
+                            +  dr32(MultiColFrames);
 
        /* detailed tx errors */
-       stat_reg = readw (ioaddr + FramesAbortXSColls);
+       stat_reg = dr16(FramesAbortXSColls);
        np->stats.tx_aborted_errors += stat_reg;
        np->stats.tx_errors += stat_reg;
 
-       stat_reg = readw (ioaddr + CarrierSenseErrors);
+       stat_reg = dr16(CarrierSenseErrors);
        np->stats.tx_carrier_errors += stat_reg;
        np->stats.tx_errors += stat_reg;
 
        /* Clear all other statistic register. */
-       readl (ioaddr + McstOctetXmtOk);
-       readw (ioaddr + BcstFramesXmtdOk);
-       readl (ioaddr + McstFramesXmtdOk);
-       readw (ioaddr + BcstFramesRcvdOk);
-       readw (ioaddr + MacControlFramesRcvd);
-       readw (ioaddr + FrameTooLongErrors);
-       readw (ioaddr + InRangeLengthErrors);
-       readw (ioaddr + FramesCheckSeqErrors);
-       readw (ioaddr + FramesLostRxErrors);
-       readl (ioaddr + McstOctetXmtOk);
-       readl (ioaddr + BcstOctetXmtOk);
-       readl (ioaddr + McstFramesXmtdOk);
-       readl (ioaddr + FramesWDeferredXmt);
-       readl (ioaddr + LateCollisions);
-       readw (ioaddr + BcstFramesXmtdOk);
-       readw (ioaddr + MacControlFramesXmtd);
-       readw (ioaddr + FramesWEXDeferal);
+       dr32(McstOctetXmtOk);
+       dr16(BcstFramesXmtdOk);
+       dr32(McstFramesXmtdOk);
+       dr16(BcstFramesRcvdOk);
+       dr16(MacControlFramesRcvd);
+       dr16(FrameTooLongErrors);
+       dr16(InRangeLengthErrors);
+       dr16(FramesCheckSeqErrors);
+       dr16(FramesLostRxErrors);
+       dr32(McstOctetXmtOk);
+       dr32(BcstOctetXmtOk);
+       dr32(McstFramesXmtdOk);
+       dr32(FramesWDeferredXmt);
+       dr32(LateCollisions);
+       dr16(BcstFramesXmtdOk);
+       dr16(MacControlFramesXmtd);
+       dr16(FramesWEXDeferal);
 
 #ifdef MEM_MAPPING
        for (i = 0x100; i <= 0x150; i += 4)
-               readl (ioaddr + i);
+               dr32(i);
 #endif
-       readw (ioaddr + TxJumboFrames);
-       readw (ioaddr + RxJumboFrames);
-       readw (ioaddr + TCPCheckSumErrors);
-       readw (ioaddr + UDPCheckSumErrors);
-       readw (ioaddr + IPCheckSumErrors);
+       dr16(TxJumboFrames);
+       dr16(RxJumboFrames);
+       dr16(TCPCheckSumErrors);
+       dr16(UDPCheckSumErrors);
+       dr16(IPCheckSumErrors);
        return &np->stats;
 }
 
 static int
 clear_stats (struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
 #ifdef MEM_MAPPING
        int i;
 #endif
 
        /* All statistics registers need to be acknowledged,
           else statistic overflow could cause problems */
-       readl (ioaddr + FramesRcvOk);
-       readl (ioaddr + FramesXmtOk);
-       readl (ioaddr + OctetRcvOk);
-       readl (ioaddr + OctetXmtOk);
-
-       readl (ioaddr + McstFramesRcvdOk);
-       readl (ioaddr + SingleColFrames);
-       readl (ioaddr + MultiColFrames);
-       readl (ioaddr + LateCollisions);
+       dr32(FramesRcvOk);
+       dr32(FramesXmtOk);
+       dr32(OctetRcvOk);
+       dr32(OctetXmtOk);
+
+       dr32(McstFramesRcvdOk);
+       dr32(SingleColFrames);
+       dr32(MultiColFrames);
+       dr32(LateCollisions);
        /* detailed rx errors */
-       readw (ioaddr + FrameTooLongErrors);
-       readw (ioaddr + InRangeLengthErrors);
-       readw (ioaddr + FramesCheckSeqErrors);
-       readw (ioaddr + FramesLostRxErrors);
+       dr16(FrameTooLongErrors);
+       dr16(InRangeLengthErrors);
+       dr16(FramesCheckSeqErrors);
+       dr16(FramesLostRxErrors);
 
        /* detailed tx errors */
-       readw (ioaddr + FramesAbortXSColls);
-       readw (ioaddr + CarrierSenseErrors);
+       dr16(FramesAbortXSColls);
+       dr16(CarrierSenseErrors);
 
        /* Clear all other statistic register. */
-       readl (ioaddr + McstOctetXmtOk);
-       readw (ioaddr + BcstFramesXmtdOk);
-       readl (ioaddr + McstFramesXmtdOk);
-       readw (ioaddr + BcstFramesRcvdOk);
-       readw (ioaddr + MacControlFramesRcvd);
-       readl (ioaddr + McstOctetXmtOk);
-       readl (ioaddr + BcstOctetXmtOk);
-       readl (ioaddr + McstFramesXmtdOk);
-       readl (ioaddr + FramesWDeferredXmt);
-       readw (ioaddr + BcstFramesXmtdOk);
-       readw (ioaddr + MacControlFramesXmtd);
-       readw (ioaddr + FramesWEXDeferal);
+       dr32(McstOctetXmtOk);
+       dr16(BcstFramesXmtdOk);
+       dr32(McstFramesXmtdOk);
+       dr16(BcstFramesRcvdOk);
+       dr16(MacControlFramesRcvd);
+       dr32(McstOctetXmtOk);
+       dr32(BcstOctetXmtOk);
+       dr32(McstFramesXmtdOk);
+       dr32(FramesWDeferredXmt);
+       dr16(BcstFramesXmtdOk);
+       dr16(MacControlFramesXmtd);
+       dr16(FramesWEXDeferal);
 #ifdef MEM_MAPPING
        for (i = 0x100; i <= 0x150; i += 4)
-               readl (ioaddr + i);
+               dr32(i);
 #endif
-       readw (ioaddr + TxJumboFrames);
-       readw (ioaddr + RxJumboFrames);
-       readw (ioaddr + TCPCheckSumErrors);
-       readw (ioaddr + UDPCheckSumErrors);
-       readw (ioaddr + IPCheckSumErrors);
+       dr16(TxJumboFrames);
+       dr16(RxJumboFrames);
+       dr16(TCPCheckSumErrors);
+       dr16(UDPCheckSumErrors);
+       dr16(IPCheckSumErrors);
        return 0;
 }
 
@@ -1114,10 +1109,10 @@ change_mtu (struct net_device *dev, int new_mtu)
 static void
 set_multicast (struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        u32 hash_table[2];
        u16 rx_mode = 0;
-       struct netdev_private *np = netdev_priv(dev);
 
        hash_table[0] = hash_table[1] = 0;
        /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
@@ -1153,9 +1148,9 @@ set_multicast (struct net_device *dev)
                rx_mode |= ReceiveVLANMatch;
        }
 
-       writel (hash_table[0], ioaddr + HashTable0);
-       writel (hash_table[1], ioaddr + HashTable1);
-       writew (rx_mode, ioaddr + ReceiveMode);
+       dw32(HashTable0, hash_table[0]);
+       dw32(HashTable1, hash_table[1]);
+       dw16(ReceiveMode, rx_mode);
 }
 
 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1318,15 +1313,15 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
 #define EEP_BUSY 0x8000
 /* Read the EEPROM word */
 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
-static int
-read_eeprom (long ioaddr, int eep_addr)
+static int read_eeprom(struct netdev_private *np, int eep_addr)
 {
+       void __iomem *ioaddr = np->eeprom_addr;
        int i = 1000;
-       outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
+
+       dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
        while (i-- > 0) {
-               if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
-                       return inw (ioaddr + EepromData);
-               }
+               if (!(dr16(EepromCtrl) & EEP_BUSY))
+                       return dr16(EepromData);
        }
        return 0;
 }
@@ -1336,38 +1331,40 @@ enum phy_ctrl_bits {
        MII_DUPLEX = 0x08,
 };
 
-#define mii_delay() readb(ioaddr)
+#define mii_delay() dr8(PhyCtrl)
 static void
 mii_sendbit (struct net_device *dev, u32 data)
 {
-       long ioaddr = dev->base_addr + PhyCtrl;
-       data = (data) ? MII_DATA1 : 0;
-       data |= MII_WRITE;
-       data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
-       writeb (data, ioaddr);
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
+
+       data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
+       dw8(PhyCtrl, data);
        mii_delay ();
-       writeb (data | MII_CLK, ioaddr);
+       dw8(PhyCtrl, data | MII_CLK);
        mii_delay ();
 }
 
 static int
 mii_getbit (struct net_device *dev)
 {
-       long ioaddr = dev->base_addr + PhyCtrl;
+       struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
        u8 data;
 
-       data = (readb (ioaddr) & 0xf8) | MII_READ;
-       writeb (data, ioaddr);
+       data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
+       dw8(PhyCtrl, data);
        mii_delay ();
-       writeb (data | MII_CLK, ioaddr);
+       dw8(PhyCtrl, data | MII_CLK);
        mii_delay ();
-       return ((readb (ioaddr) >> 1) & 1);
+       return (dr8(PhyCtrl) >> 1) & 1;
 }
 
 static void
 mii_send_bits (struct net_device *dev, u32 data, int len)
 {
        int i;
+
        for (i = len - 1; i >= 0; i--) {
                mii_sendbit (dev, data & (1 << i));
        }
@@ -1721,28 +1718,29 @@ mii_set_media_pcs (struct net_device *dev)
 static int
 rio_close (struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
        struct netdev_private *np = netdev_priv(dev);
+       void __iomem *ioaddr = np->ioaddr;
+
+       struct pci_dev *pdev = np->pdev;
        struct sk_buff *skb;
        int i;
 
        netif_stop_queue (dev);
 
        /* Disable interrupts */
-       writew (0, ioaddr + IntEnable);
+       dw16(IntEnable, 0);
 
        /* Stop Tx and Rx logics */
-       writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
+       dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
 
-       free_irq (dev->irq, dev);
+       free_irq(pdev->irq, dev);
        del_timer_sync (&np->timer);
 
        /* Free all the skbuffs in the queue. */
        for (i = 0; i < RX_RING_SIZE; i++) {
                skb = np->rx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(np->pdev,
-                                        desc_to_dma(&np->rx_ring[i]),
+                       pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
                                         skb->len, PCI_DMA_FROMDEVICE);
                        dev_kfree_skb (skb);
                        np->rx_skbuff[i] = NULL;
@@ -1753,8 +1751,7 @@ rio_close (struct net_device *dev)
        for (i = 0; i < TX_RING_SIZE; i++) {
                skb = np->tx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(np->pdev,
-                                        desc_to_dma(&np->tx_ring[i]),
+                       pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
                                         skb->len, PCI_DMA_TODEVICE);
                        dev_kfree_skb (skb);
                        np->tx_skbuff[i] = NULL;
@@ -1778,8 +1775,9 @@ rio_remove1 (struct pci_dev *pdev)
                pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
                                     np->tx_ring_dma);
 #ifdef MEM_MAPPING
-               iounmap ((char *) (dev->base_addr));
+               pci_iounmap(pdev, np->ioaddr);
 #endif
+               pci_iounmap(pdev, np->eeprom_addr);
                free_netdev (dev);
                pci_release_regions (pdev);
                pci_disable_device (pdev);
index ba0adcafa55ad71c2dd63098796ce003a4cb6701..40ba6e02988c9842f31b816c955049865f654c24 100644 (file)
 #define TX_TOTAL_SIZE  TX_RING_SIZE*sizeof(struct netdev_desc)
 #define RX_TOTAL_SIZE  RX_RING_SIZE*sizeof(struct netdev_desc)
 
-/* This driver was written to use PCI memory space, however x86-oriented
-   hardware often uses I/O space accesses. */
-#ifndef MEM_MAPPING
-#undef readb
-#undef readw
-#undef readl
-#undef writeb
-#undef writew
-#undef writel
-#define readb inb
-#define readw inw
-#define readl inl
-#define writeb outb
-#define writew outw
-#define writel outl
-#endif
-
 /* Offsets to the device registers.
    Unlike software-only systems, device drivers interact with complex hardware.
    It's not useful to define symbolic names for every register bit in the
@@ -391,6 +374,8 @@ struct netdev_private {
        dma_addr_t tx_ring_dma;
        dma_addr_t rx_ring_dma;
        struct pci_dev *pdev;
+       void __iomem *ioaddr;
+       void __iomem *eeprom_addr;
        spinlock_t tx_lock;
        spinlock_t rx_lock;
        struct net_device_stats stats;
index d783f4f96ec0963b36f235105294c648893c777b..d7bb52a7bda1092c9b828f2b1dacc6d2a029f0cd 100644 (file)
@@ -522,9 +522,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
                        cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       dev->base_addr = (unsigned long)ioaddr;
-       dev->irq = irq;
-
        np = netdev_priv(dev);
        np->base = ioaddr;
        np->pci_dev = pdev;
@@ -828,18 +825,19 @@ static int netdev_open(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base;
+       const int irq = np->pci_dev->irq;
        unsigned long flags;
        int i;
 
        /* Do we need to reset the chip??? */
 
-       i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+       i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
        if (i)
                return i;
 
        if (netif_msg_ifup(np))
-               printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-                          dev->name, dev->irq);
+               printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
+
        init_ring(dev);
 
        iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
@@ -1814,7 +1812,7 @@ static int netdev_close(struct net_device *dev)
        }
 #endif /* __i386__ debugging only */
 
-       free_irq(dev->irq, dev);
+       free_irq(np->pci_dev->irq, dev);
 
        del_timer_sync(&np->timer);
 
index b276469f74e9908487f6a9555fddcdbdd8a5b3f1..290b26f868c9f7d358c125822dd67bc412d85b16 100644 (file)
@@ -815,6 +815,7 @@ static const struct ethtool_ops dnet_ethtool_ops = {
        .set_settings           = dnet_set_settings,
        .get_drvinfo            = dnet_get_drvinfo,
        .get_link               = ethtool_op_get_link,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops dnet_netdev_ops = {
index 1637b986229269b4c3f7b7dcc8f1eeac62b4a915..9d71c9cc300bbfb90ce362e89f428c1c709ac8b8 100644 (file)
@@ -545,9 +545,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
        /* Reset the chip to erase previous misconfiguration. */
        iowrite32(0x00000001, ioaddr + BCR);
 
-       dev->base_addr = (unsigned long)ioaddr;
-       dev->irq = irq;
-
        /* Make certain the descriptor lists are aligned. */
        np = netdev_priv(dev);
        np->mem = ioaddr;
@@ -832,11 +829,13 @@ static int netdev_open(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->mem;
-       int i;
+       const int irq = np->pci_dev->irq;
+       int rc, i;
 
        iowrite32(0x00000001, ioaddr + BCR);    /* Reset */
 
-       if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev))
+       rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+       if (rc)
                return -EAGAIN;
 
        for (i = 0; i < 3; i++)
@@ -924,8 +923,7 @@ static int netdev_open(struct net_device *dev)
        np->reset_timer.data = (unsigned long) dev;
        np->reset_timer.function = reset_timer;
        np->reset_timer_armed = 0;
-
-       return 0;
+       return rc;
 }
 
 
@@ -1910,7 +1908,7 @@ static int netdev_close(struct net_device *dev)
        del_timer_sync(&np->timer);
        del_timer_sync(&np->reset_timer);
 
-       free_irq(dev->irq, dev);
+       free_irq(np->pci_dev->irq, dev);
 
        /* Free all the skbuffs in the Rx queue. */
        for (i = 0; i < RX_RING_SIZE; i++) {
index a12b3f5bc025a1dd84d80d51ab7d6ecf29f7f6c9..7fa0227c9c02b7aac3339987e64285b9ea8de0d2 100644 (file)
@@ -1161,6 +1161,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
        .set_settings           = fec_enet_set_settings,
        .get_drvinfo            = fec_enet_get_drvinfo,
        .get_link               = ethtool_op_get_link,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
index 7b34d8c698dae663cd3ebf259e4918696b7c9585..97f947b3d94af9c6b50811ca1b527b34307735c5 100644 (file)
@@ -811,6 +811,7 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
        .get_link = ethtool_op_get_link,
        .get_msglevel = mpc52xx_fec_get_msglevel,
        .set_msglevel = mpc52xx_fec_set_msglevel,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 
index e4e6cd2c5f829f7fd1a1bde5366a7bacab8b3f54..2b7633f766d98a0d79138fc317303579c71ddae6 100644 (file)
@@ -963,6 +963,7 @@ static const struct ethtool_ops fs_ethtool_ops = {
        .get_msglevel = fs_get_msglevel,
        .set_msglevel = fs_set_msglevel,
        .get_regs = fs_get_regs,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index 4c9f8d487dbbf4825e10fc2e3ee25199662e7517..2136c7ff5e6d2331bf22195194b7f8b028f98587 100644 (file)
@@ -1210,4 +1210,7 @@ struct filer_table {
        struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
 };
 
+/* The gianfar_ptp module will set this variable */
+extern int gfar_phc_index;
+
 #endif /* __GIANFAR_H */
index 8d74efd04bb9bc478a0a576d27740d5f22dcb385..8a025570d97e70201dadeb4efd739d35947a3d5e 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
@@ -1739,6 +1740,34 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
        return ret;
 }
 
+int gfar_phc_index = -1;
+
+static int gfar_get_ts_info(struct net_device *dev,
+                           struct ethtool_ts_info *info)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+
+       if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
+               info->so_timestamping =
+                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                       SOF_TIMESTAMPING_SOFTWARE;
+               info->phc_index = -1;
+               return 0;
+       }
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_HARDWARE |
+               SOF_TIMESTAMPING_RX_HARDWARE |
+               SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->phc_index = gfar_phc_index;
+       info->tx_types =
+               (1 << HWTSTAMP_TX_OFF) |
+               (1 << HWTSTAMP_TX_ON);
+       info->rx_filters =
+               (1 << HWTSTAMP_FILTER_NONE) |
+               (1 << HWTSTAMP_FILTER_ALL);
+       return 0;
+}
+
 const struct ethtool_ops gfar_ethtool_ops = {
        .get_settings = gfar_gsettings,
        .set_settings = gfar_ssettings,
@@ -1761,4 +1790,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
 #endif
        .set_rxnfc = gfar_set_nfc,
        .get_rxnfc = gfar_get_nfc,
+       .get_ts_info = gfar_get_ts_info,
 };
index 5fd620bec15cfeb993313b9052fc64894967cd74..c08e5d40fecb425c7f6c26b8de269d270bac727f 100644 (file)
@@ -515,6 +515,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
                err = PTR_ERR(etsects->clock);
                goto no_clock;
        }
+       gfar_phc_clock = ptp_clock_index(etsects->clock);
 
        dev_set_drvdata(&dev->dev, etsects);
 
@@ -538,6 +539,7 @@ static int gianfar_ptp_remove(struct platform_device *dev)
        gfar_write(&etsects->regs->tmr_temask, 0);
        gfar_write(&etsects->regs->tmr_ctrl,   0);
 
+       gfar_phc_clock = -1;
        ptp_clock_unregister(etsects->clock);
        iounmap(etsects->regs);
        release_resource(etsects->rsrc);
index a97257f91a3d39776c378abf5b90aee12d79dacc..37b035306013a0ac443040594177424e44a8422b 100644 (file)
@@ -415,6 +415,7 @@ static const struct ethtool_ops uec_ethtool_ops = {
        .get_ethtool_stats      = uec_get_ethtool_stats,
        .get_wol                = uec_get_wol,
        .set_wol                = uec_set_wol,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 void uec_set_ethtool_ops(struct net_device *netdev)
index 76213162fbe32499869470472243b96a003afbb8..74215c05d7990fefdb6f9db4ff58b7258b623861 100644 (file)
@@ -7,7 +7,7 @@ config NET_VENDOR_INTEL
        default y
        depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
                   ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
-                  GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
+                  GSC || BVME6000 || MVME16x || \
                   (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
                   EXPERIMENTAL
        ---help---
@@ -120,6 +120,17 @@ config IGB_DCA
          driver.  DCA is a method for warming the CPU cache before data
          is used, with the intent of lessening the impact of cache misses.
 
+config IGB_PTP
+       bool "PTP Hardware Clock (PHC)"
+       default y
+       depends on IGB && PTP_1588_CLOCK
+       ---help---
+         Say Y here if you want to use PTP Hardware Clock (PHC) in the
+         driver.  Only the basic clock operations have been implemented.
+
+         Every timestamp and clock read operations must consult the
+         overflow counter to form a correct time value.
+
 config IGBVF
        tristate "Intel(R) 82576 Virtual Function Ethernet support"
        depends on PCI
index 4348b6fd44fac08e06666af2f581889d418ada7a..3d712f262e830ef193091e8fceed17244171ed69 100644 (file)
@@ -827,9 +827,10 @@ static int e1000_set_features(struct net_device *netdev,
        if (changed & NETIF_F_HW_VLAN_RX)
                e1000_vlan_mode(netdev, features);
 
-       if (!(changed & NETIF_F_RXCSUM))
+       if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
                return 0;
 
+       netdev->features = features;
        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 
        if (netif_running(netdev))
@@ -1074,6 +1075,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        netdev->features |= netdev->hw_features;
        netdev->hw_features |= NETIF_F_RXCSUM;
+       netdev->hw_features |= NETIF_F_RXALL;
        netdev->hw_features |= NETIF_F_RXFCS;
 
        if (pci_using_dac) {
@@ -1841,6 +1843,22 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
                        break;
        }
 
+       /* This is useful for sniffing bad packets. */
+       if (adapter->netdev->features & NETIF_F_RXALL) {
+               /* UPE and MPE will be handled by normal PROMISC logic
+                * in e1000e_set_rx_mode */
+               rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+                        E1000_RCTL_BAM | /* RX All Bcast Pkts */
+                        E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+               rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+                         E1000_RCTL_DPF | /* Allow filtered pause */
+                         E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+               /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+                * and that breaks VLANs.
+                */
+       }
+
        ew32(RCTL, rctl);
 }
 
@@ -4057,6 +4075,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                                                       irq_flags);
                                length--;
                        } else {
+                               if (netdev->features & NETIF_F_RXALL)
+                                       goto process_skb;
                                /* recycle both page and skb */
                                buffer_info->skb = skb;
                                /* an error means any chain goes out the window
@@ -4069,6 +4089,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
                }
 
 #define rxtop rx_ring->rx_skb_top
+process_skb:
                if (!(status & E1000_RXD_STAT_EOP)) {
                        /* this descriptor is only the beginning (or middle) */
                        if (!rxtop) {
@@ -4276,12 +4297,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                                                       flags);
                                length--;
                        } else {
+                               if (netdev->features & NETIF_F_RXALL)
+                                       goto process_skb;
                                /* recycle */
                                buffer_info->skb = skb;
                                goto next_desc;
                        }
                }
 
+process_skb:
                total_rx_bytes += (length - 4); /* don't count FCS */
                total_rx_packets++;
 
index db35dd5d96de570445b3cd4e820bda2edea09a5c..6302b10cb3a64d666ccb5342f5924a2729899d48 100644 (file)
@@ -403,15 +403,15 @@ static void e1000_get_regs(struct net_device *netdev,
        regs_buff[1]  = er32(STATUS);
 
        regs_buff[2]  = er32(RCTL);
-       regs_buff[3]  = er32(RDLEN);
-       regs_buff[4]  = er32(RDH);
-       regs_buff[5]  = er32(RDT);
+       regs_buff[3]  = er32(RDLEN(0));
+       regs_buff[4]  = er32(RDH(0));
+       regs_buff[5]  = er32(RDT(0));
        regs_buff[6]  = er32(RDTR);
 
        regs_buff[7]  = er32(TCTL);
-       regs_buff[8]  = er32(TDLEN);
-       regs_buff[9]  = er32(TDH);
-       regs_buff[10] = er32(TDT);
+       regs_buff[8]  = er32(TDLEN(0));
+       regs_buff[9]  = er32(TDH(0));
+       regs_buff[10] = er32(TDT(0));
        regs_buff[11] = er32(TIDV);
 
        regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
@@ -813,15 +813,15 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        }
 
        REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
-       REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
-       REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
-       REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
-       REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
+       REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+       REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
+       REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
+       REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
        REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
        REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
        REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
-       REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
-       REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
+       REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+       REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
 
        REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
 
@@ -830,10 +830,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
 
        REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
-       REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+       REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
        if (!(adapter->flags & FLAG_IS_ICH))
                REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
-       REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+       REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
        REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
        mask = 0x8003FFFF;
        switch (mac->type) {
@@ -1104,11 +1104,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 
-       ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
-       ew32(TDBAH, ((u64) tx_ring->dma >> 32));
-       ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
-       ew32(TDH, 0);
-       ew32(TDT, 0);
+       ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+       ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
+       ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
+       ew32(TDH(0), 0);
+       ew32(TDT(0), 0);
        ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
             E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
             E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
@@ -1168,11 +1168,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        rctl = er32(RCTL);
        if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
                ew32(RCTL, rctl & ~E1000_RCTL_EN);
-       ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
-       ew32(RDBAH, ((u64) rx_ring->dma >> 32));
-       ew32(RDLEN, rx_ring->size);
-       ew32(RDH, 0);
-       ew32(RDT, 0);
+       ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
+       ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
+       ew32(RDLEN(0), rx_ring->size);
+       ew32(RDH(0), 0);
+       ew32(RDT(0), 0);
        rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
                E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
                E1000_RCTL_SBP | E1000_RCTL_SECRC |
@@ -1534,7 +1534,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
        int ret_val = 0;
        unsigned long time;
 
-       ew32(RDT, rx_ring->count - 1);
+       ew32(RDT(0), rx_ring->count - 1);
 
        /*
         * Calculate the loop count based on the largest descriptor ring
@@ -1561,7 +1561,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
                        if (k == tx_ring->count)
                                k = 0;
                }
-               ew32(TDT, k);
+               ew32(TDT(0), k);
                e1e_flush();
                msleep(200);
                time = jiffies; /* set the start time for the receive */
index f82ecf536c8b4ec42e836852dc889841fa81316e..923d3fd6ce119e835d46599a013f58d0f87701a6 100644 (file)
@@ -94,31 +94,40 @@ enum e1e_registers {
        E1000_FCRTL    = 0x02160, /* Flow Control Receive Threshold Low - RW */
        E1000_FCRTH    = 0x02168, /* Flow Control Receive Threshold High - RW */
        E1000_PSRCTL   = 0x02170, /* Packet Split Receive Control - RW */
-       E1000_RDBAL    = 0x02800, /* Rx Descriptor Base Address Low - RW */
-       E1000_RDBAH    = 0x02804, /* Rx Descriptor Base Address High - RW */
-       E1000_RDLEN    = 0x02808, /* Rx Descriptor Length - RW */
-       E1000_RDH      = 0x02810, /* Rx Descriptor Head - RW */
-       E1000_RDT      = 0x02818, /* Rx Descriptor Tail - RW */
-       E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
-       E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
-#define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
-       E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
-
-/* Convenience macros
+/*
+ * Convenience macros
  *
  * Note: "_n" is the queue number of the register to be written to.
  *
  * Example usage:
- * E1000_RDBAL_REG(current_rx_queue)
- *
+ * E1000_RDBAL(current_rx_queue)
  */
-#define E1000_RDBAL_REG(_n)   (E1000_RDBAL + (_n << 8))
+       E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
+#define E1000_RDBAL(_n)        (E1000_RDBAL_BASE + (_n << 8))
+       E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
+#define E1000_RDBAH(_n)        (E1000_RDBAH_BASE + (_n << 8))
+       E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
+#define E1000_RDLEN(_n)        (E1000_RDLEN_BASE + (_n << 8))
+       E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
+#define E1000_RDH(_n)  (E1000_RDH_BASE + (_n << 8))
+       E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
+#define E1000_RDT(_n)  (E1000_RDT_BASE + (_n << 8))
+       E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
+       E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
+#define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
+       E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
+
        E1000_KABGTXD  = 0x03004, /* AFE Band Gap Transmit Ref Data */
-       E1000_TDBAL    = 0x03800, /* Tx Descriptor Base Address Low - RW */
-       E1000_TDBAH    = 0x03804, /* Tx Descriptor Base Address High - RW */
-       E1000_TDLEN    = 0x03808, /* Tx Descriptor Length - RW */
-       E1000_TDH      = 0x03810, /* Tx Descriptor Head - RW */
-       E1000_TDT      = 0x03818, /* Tx Descriptor Tail - RW */
+       E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
+#define E1000_TDBAL(_n)        (E1000_TDBAL_BASE + (_n << 8))
+       E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
+#define E1000_TDBAH(_n)        (E1000_TDBAH_BASE + (_n << 8))
+       E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
+#define E1000_TDLEN(_n)        (E1000_TDLEN_BASE + (_n << 8))
+       E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
+#define E1000_TDH(_n)  (E1000_TDH_BASE + (_n << 8))
+       E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
+#define E1000_TDT(_n)  (E1000_TDT_BASE + (_n << 8))
        E1000_TIDV     = 0x03820, /* Tx Interrupt Delay Value - RW */
        E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
 #define E1000_TXDCTL(_n)   (E1000_TXDCTL_BASE + (_n << 8))
index 19ab2154802c171fcadaeceb82419681b51c90f2..851f7937db29d8e094e3dc4c71d8571dea82401b 100644 (file)
@@ -56,7 +56,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
+#define DRV_VERSION "1.10.6" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -110,14 +110,14 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 
        /* Rx Registers */
        {E1000_RCTL, "RCTL"},
-       {E1000_RDLEN, "RDLEN"},
-       {E1000_RDH, "RDH"},
-       {E1000_RDT, "RDT"},
+       {E1000_RDLEN(0), "RDLEN"},
+       {E1000_RDH(0), "RDH"},
+       {E1000_RDT(0), "RDT"},
        {E1000_RDTR, "RDTR"},
        {E1000_RXDCTL(0), "RXDCTL"},
        {E1000_ERT, "ERT"},
-       {E1000_RDBAL, "RDBAL"},
-       {E1000_RDBAH, "RDBAH"},
+       {E1000_RDBAL(0), "RDBAL"},
+       {E1000_RDBAH(0), "RDBAH"},
        {E1000_RDFH, "RDFH"},
        {E1000_RDFT, "RDFT"},
        {E1000_RDFHS, "RDFHS"},
@@ -126,11 +126,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 
        /* Tx Registers */
        {E1000_TCTL, "TCTL"},
-       {E1000_TDBAL, "TDBAL"},
-       {E1000_TDBAH, "TDBAH"},
-       {E1000_TDLEN, "TDLEN"},
-       {E1000_TDH, "TDH"},
-       {E1000_TDT, "TDT"},
+       {E1000_TDBAL(0), "TDBAL"},
+       {E1000_TDBAH(0), "TDBAH"},
+       {E1000_TDLEN(0), "TDLEN"},
+       {E1000_TDH(0), "TDH"},
+       {E1000_TDT(0), "TDT"},
        {E1000_TIDV, "TIDV"},
        {E1000_TXDCTL(0), "TXDCTL"},
        {E1000_TADV, "TADV"},
@@ -1053,7 +1053,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
 
        if (!adapter->tx_hang_recheck &&
            (adapter->flags2 & FLAG2_DMA_BURST)) {
-               /* May be block on write-back, flush and detect again
+               /*
+                * May be block on write-back, flush and detect again
                 * flush pending descriptor writebacks to memory
                 */
                ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -2530,33 +2531,31 @@ err:
 }
 
 /**
- * e1000_clean - NAPI Rx polling callback
+ * e1000e_poll - NAPI Rx polling callback
  * @napi: struct associated with this polling callback
- * @budget: amount of packets driver is allowed to process this poll
+ * @weight: number of packets driver is allowed to process this poll
  **/
-static int e1000_clean(struct napi_struct *napi, int budget)
+static int e1000e_poll(struct napi_struct *napi, int weight)
 {
-       struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+       struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+                                                    napi);
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *poll_dev = adapter->netdev;
        int tx_cleaned = 1, work_done = 0;
 
        adapter = netdev_priv(poll_dev);
 
-       if (adapter->msix_entries &&
-           !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
-               goto clean_rx;
-
-       tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
+       if (!adapter->msix_entries ||
+           (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
+               tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
 
-clean_rx:
-       adapter->clean_rx(adapter->rx_ring, &work_done, budget);
+       adapter->clean_rx(adapter->rx_ring, &work_done, weight);
 
        if (!tx_cleaned)
-               work_done = budget;
+               work_done = weight;
 
-       /* If budget not fully consumed, exit the polling mode */
-       if (work_done < budget) {
+       /* If weight not fully consumed, exit the polling mode */
+       if (work_done < weight) {
                if (adapter->itr_setting & 3)
                        e1000_set_itr(adapter);
                napi_complete(napi);
@@ -2800,13 +2799,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        /* Setup the HW Tx Head and Tail descriptor pointers */
        tdba = tx_ring->dma;
        tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
-       ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
-       ew32(TDBAH, (tdba >> 32));
-       ew32(TDLEN, tdlen);
-       ew32(TDH, 0);
-       ew32(TDT, 0);
-       tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
-       tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
+       ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
+       ew32(TDBAH(0), (tdba >> 32));
+       ew32(TDLEN(0), tdlen);
+       ew32(TDH(0), 0);
+       ew32(TDT(0), 0);
+       tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
+       tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
 
        /* Set the Tx Interrupt Delay register */
        ew32(TIDV, adapter->tx_int_delay);
@@ -3110,13 +3109,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
         * the Base and Length of the Rx Descriptor Ring
         */
        rdba = rx_ring->dma;
-       ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
-       ew32(RDBAH, (rdba >> 32));
-       ew32(RDLEN, rdlen);
-       ew32(RDH, 0);
-       ew32(RDT, 0);
-       rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
-       rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
+       ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
+       ew32(RDBAH(0), (rdba >> 32));
+       ew32(RDLEN(0), rdlen);
+       ew32(RDH(0), 0);
+       ew32(RDT(0), 0);
+       rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
+       rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
 
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
@@ -6226,7 +6225,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        netdev->netdev_ops              = &e1000e_netdev_ops;
        e1000e_set_ethtool_ops(netdev);
        netdev->watchdog_timeo          = 5 * HZ;
-       netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+       netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
        strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
        netdev->mem_start = mmio_start;
index 6565c463185c2a8f705ceee712005643f9a7389d..4bd16e2664145babd34f9b8751bada43b67a5ba0 100644 (file)
@@ -35,3 +35,4 @@ obj-$(CONFIG_IGB) += igb.o
 igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
            e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
 
+igb-$(CONFIG_IGB_PTP) += igb_ptp.o
index 8e33bdd33eea5663c208dc6db16ca7f5371102ae..3758ad2467420371b3ab689ead0a4e0a45a0dc47 100644 (file)
@@ -35,8 +35,8 @@
 #include "e1000_82575.h"
 
 #include <linux/clocksource.h>
-#include <linux/timecompare.h>
 #include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
@@ -328,9 +328,6 @@ struct igb_adapter {
 
        /* OS defined structs */
        struct pci_dev *pdev;
-       struct cyclecounter cycles;
-       struct timecounter clock;
-       struct timecompare compare;
        struct hwtstamp_config hwtstamp_config;
 
        spinlock_t stats64_lock;
@@ -364,6 +361,13 @@ struct igb_adapter {
        u32 wvbr;
        int node;
        u32 *shadow_vfta;
+
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info caps;
+       struct delayed_work overflow_work;
+       spinlock_t tmreg_lock;
+       struct cyclecounter cc;
+       struct timecounter tc;
 };
 
 #define IGB_FLAG_HAS_MSI           (1 << 0)
@@ -378,7 +382,6 @@ struct igb_adapter {
 #define IGB_DMCTLX_DCFLUSH_DIS     0x80000000  /* Disable DMA Coal Flush */
 
 #define IGB_82576_TSYNC_SHIFT 19
-#define IGB_82580_TSYNC_SHIFT 24
 #define IGB_TS_HDR_LEN        16
 enum e1000_state_t {
        __IGB_TESTING,
@@ -414,7 +417,15 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
 extern bool igb_has_link(struct igb_adapter *adapter);
 extern void igb_set_ethtool_ops(struct net_device *);
 extern void igb_power_up_link(struct igb_adapter *);
+#ifdef CONFIG_IGB_PTP
+extern void igb_ptp_init(struct igb_adapter *adapter);
+extern void igb_ptp_remove(struct igb_adapter *adapter);
+
+extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+                                  struct skb_shared_hwtstamps *hwtstamps,
+                                  u64 systim);
 
+#endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
        if (hw->phy.ops.reset)
index 5ec31598ee4775708094fabcd3e1775325e65dfa..f022ff7900f742f8a81b2e7c0e47e69409fbe0d9 100644 (file)
@@ -114,7 +114,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
 static void igb_setup_mrqc(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
-static void igb_init_hw_timer(struct igb_adapter *adapter);
 static int igb_sw_init(struct igb_adapter *);
 static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
@@ -565,33 +564,6 @@ exit:
        return;
 }
 
-
-/**
- * igb_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t igb_read_clock(const struct cyclecounter *tc)
-{
-       struct igb_adapter *adapter =
-               container_of(tc, struct igb_adapter, cycles);
-       struct e1000_hw *hw = &adapter->hw;
-       u64 stamp = 0;
-       int shift = 0;
-
-       /*
-        * The timestamp latches on lowest register read. For the 82580
-        * the lowest register is SYSTIMR instead of SYSTIML.  However we never
-        * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
-        */
-       if (hw->mac.type >= e1000_82580) {
-               stamp = rd32(E1000_SYSTIMR) >> 8;
-               shift = IGB_82580_TSYNC_SHIFT;
-       }
-
-       stamp |= (u64)rd32(E1000_SYSTIML) << shift;
-       stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
-       return stamp;
-}
-
 /**
  * igb_get_hw_dev - return device
  * used by hardware layer to print debugging information
@@ -2110,9 +2082,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        }
 
 #endif
+#ifdef CONFIG_IGB_PTP
        /* do hw tstamp init after resetting */
-       igb_init_hw_timer(adapter);
+       igb_ptp_init(adapter);
 
+#endif
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
        dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2184,7 +2158,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        struct e1000_hw *hw = &adapter->hw;
 
        pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_PTP
+       igb_ptp_remove(adapter);
 
+#endif
        /*
         * The watchdog timer may be rescheduled, so explicitly
         * disable watchdog from being rescheduled.
@@ -2303,112 +2280,6 @@ out:
 #endif /* CONFIG_PCI_IOV */
 }
 
-/**
- * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
- * @adapter: board private structure to initialize
- *
- * igb_init_hw_timer initializes the function pointer and values for the hw
- * timer found in hardware.
- **/
-static void igb_init_hw_timer(struct igb_adapter *adapter)
-{
-       struct e1000_hw *hw = &adapter->hw;
-
-       switch (hw->mac.type) {
-       case e1000_i350:
-       case e1000_82580:
-               memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-               adapter->cycles.read = igb_read_clock;
-               adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-               adapter->cycles.mult = 1;
-               /*
-                * The 82580 timesync updates the system timer every 8ns by 8ns
-                * and the value cannot be shifted.  Instead we need to shift
-                * the registers to generate a 64bit timer value.  As a result
-                * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
-                * 24 in order to generate a larger value for synchronization.
-                */
-               adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
-               /* disable system timer temporarily by setting bit 31 */
-               wr32(E1000_TSAUXC, 0x80000000);
-               wrfl();
-
-               /* Set registers so that rollover occurs soon to test this. */
-               wr32(E1000_SYSTIMR, 0x00000000);
-               wr32(E1000_SYSTIML, 0x80000000);
-               wr32(E1000_SYSTIMH, 0x000000FF);
-               wrfl();
-
-               /* enable system timer by clearing bit 31 */
-               wr32(E1000_TSAUXC, 0x0);
-               wrfl();
-
-               timecounter_init(&adapter->clock,
-                                &adapter->cycles,
-                                ktime_to_ns(ktime_get_real()));
-               /*
-                * Synchronize our NIC clock against system wall clock. NIC
-                * time stamp reading requires ~3us per sample, each sample
-                * was pretty stable even under load => only require 10
-                * samples for each offset comparison.
-                */
-               memset(&adapter->compare, 0, sizeof(adapter->compare));
-               adapter->compare.source = &adapter->clock;
-               adapter->compare.target = ktime_get_real;
-               adapter->compare.num_samples = 10;
-               timecompare_update(&adapter->compare, 0);
-               break;
-       case e1000_82576:
-               /*
-                * Initialize hardware timer: we keep it running just in case
-                * that some program needs it later on.
-                */
-               memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-               adapter->cycles.read = igb_read_clock;
-               adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-               adapter->cycles.mult = 1;
-               /**
-                * Scale the NIC clock cycle by a large factor so that
-                * relatively small clock corrections can be added or
-                * subtracted at each clock tick. The drawbacks of a large
-                * factor are a) that the clock register overflows more quickly
-                * (not such a big deal) and b) that the increment per tick has
-                * to fit into 24 bits.  As a result we need to use a shift of
-                * 19 so we can fit a value of 16 into the TIMINCA register.
-                */
-               adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
-               wr32(E1000_TIMINCA,
-                               (1 << E1000_TIMINCA_16NS_SHIFT) |
-                               (16 << IGB_82576_TSYNC_SHIFT));
-
-               /* Set registers so that rollover occurs soon to test this. */
-               wr32(E1000_SYSTIML, 0x00000000);
-               wr32(E1000_SYSTIMH, 0xFF800000);
-               wrfl();
-
-               timecounter_init(&adapter->clock,
-                                &adapter->cycles,
-                                ktime_to_ns(ktime_get_real()));
-               /*
-                * Synchronize our NIC clock against system wall clock. NIC
-                * time stamp reading requires ~3us per sample, each sample
-                * was pretty stable even under load => only require 10
-                * samples for each offset comparison.
-                */
-               memset(&adapter->compare, 0, sizeof(adapter->compare));
-               adapter->compare.source = &adapter->clock;
-               adapter->compare.target = ktime_get_real;
-               adapter->compare.num_samples = 10;
-               timecompare_update(&adapter->compare, 0);
-               break;
-       case e1000_82575:
-               /* 82575 does not support timesync */
-       default:
-               break;
-       }
-
-}
-
 /**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
@@ -5718,35 +5589,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
        return 0;
 }
 
-/**
- * igb_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @shhwtstamps: timestamp structure to update
- * @regval: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions
- */
-static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
-                                   struct skb_shared_hwtstamps *shhwtstamps,
-                                   u64 regval)
-{
-       u64 ns;
-
-       /*
-        * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
-        * 24 to match clock shift we setup earlier.
-        */
-       if (adapter->hw.mac.type >= e1000_82580)
-               regval <<= IGB_82580_TSYNC_SHIFT;
-
-       ns = timecounter_cyc2time(&adapter->clock, regval);
-       timecompare_update(&adapter->compare, ns);
-       memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
-       shhwtstamps->hwtstamp = ns_to_ktime(ns);
-       shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
-}
-
+#ifdef CONFIG_IGB_PTP
 /**
  * igb_tx_hwtstamp - utility function which checks for TX time stamp
  * @q_vector: pointer to q_vector containing needed info
@@ -5776,6 +5619,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
        skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
 }
 
+#endif
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: pointer to q_vector containing needed info
@@ -5819,9 +5663,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                total_bytes += tx_buffer->bytecount;
                total_packets += tx_buffer->gso_segs;
 
+#ifdef CONFIG_IGB_PTP
                /* retrieve hardware timestamp */
                igb_tx_hwtstamp(q_vector, tx_buffer);
 
+#endif
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
                tx_buffer->skb = NULL;
@@ -5993,6 +5839,7 @@ static inline void igb_rx_hash(struct igb_ring *ring,
                skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 }
 
+#ifdef CONFIG_IGB_PTP
 static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
                            union e1000_adv_rx_desc *rx_desc,
                            struct sk_buff *skb)
@@ -6032,6 +5879,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
        igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
 }
 
+#endif
 static void igb_rx_vlan(struct igb_ring *ring,
                        union e1000_adv_rx_desc *rx_desc,
                        struct sk_buff *skb)
@@ -6142,7 +5990,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
                        goto next_desc;
                }
 
+#ifdef CONFIG_IGB_PTP
                igb_rx_hwtstamp(q_vector, rx_desc, skb);
+#endif
                igb_rx_hash(rx_ring, rx_desc, skb);
                igb_rx_checksum(rx_ring, rx_desc, skb);
                igb_rx_vlan(rx_ring, rx_desc, skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
new file mode 100644 (file)
index 0000000..c9b71c5
--- /dev/null
@@ -0,0 +1,381 @@
+/*
+ * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
+ *
+ * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "igb.h"
+
+#define INCVALUE_MASK          0x7fffffff
+#define ISGN                   0x80000000
+
+/*
+ * The 82580 timesync updates the system timer every 8ns by 8ns,
+ * and this update value cannot be reprogrammed.
+ *
+ * Neither the 82576 nor the 82580 offer registers wide enough to hold
+ * nanoseconds time values for very long. For the 82580, SYSTIM always
+ * counts nanoseconds, but the upper 24 bits are not availible. The
+ * frequency is adjusted by changing the 32 bit fractional nanoseconds
+ * register, TIMINCA.
+ *
+ * For the 82576, the SYSTIM register time unit is affect by the
+ * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
+ * field are needed to provide the nominal 16 nanosecond period,
+ * leaving 19 bits for fractional nanoseconds.
+ *
+ * We scale the NIC clock cycle by a large factor so that relatively
+ * small clock corrections can be added or subtracted at each clock
+ * tick. The drawbacks of a large factor are a) that the clock
+ * register overflows more quickly (not such a big deal) and b) that
+ * the increment per tick has to fit into 24 bits.  As a result we
+ * need to use a shift of 19 so we can fit a value of 16 into the
+ * TIMINCA register.
+ *
+ *
+ *             SYSTIMH            SYSTIML
+ *        +--------------+   +---+---+------+
+ *  82576 |      32      |   | 8 | 5 |  19  |
+ *        +--------------+   +---+---+------+
+ *         \________ 45 bits _______/  fract
+ *
+ *        +----------+---+   +--------------+
+ *  82580 |    24    | 8 |   |      32      |
+ *        +----------+---+   +--------------+
+ *          reserved  \______ 40 bits _____/
+ *
+ *
+ * The 45 bit 82576 SYSTIM overflows every
+ *   2^45 * 10^-9 / 3600 = 9.77 hours.
+ *
+ * The 40 bit 82580 SYSTIM overflows every
+ *   2^40 * 10^-9 /  60  = 18.3 minutes.
+ */
+
+#define IGB_OVERFLOW_PERIOD    (HZ * 60 * 9)
+#define INCPERIOD_82576                (1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK    ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576         (16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580                40
+
+/*
+ * SYSTIM read access for the 82576
+ */
+
+static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
+{
+       u64 val;
+       u32 lo, hi;
+       struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+       struct e1000_hw *hw = &igb->hw;
+
+       lo = rd32(E1000_SYSTIML);
+       hi = rd32(E1000_SYSTIMH);
+
+       val = ((u64) hi) << 32;
+       val |= lo;
+
+       return val;
+}
+
+/*
+ * SYSTIM read access for the 82580
+ */
+
+static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
+{
+       u64 val;
+       u32 lo, hi, jk;
+       struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+       struct e1000_hw *hw = &igb->hw;
+
+       /*
+        * The timestamp latches on lowest register read. For the 82580
+        * the lowest register is SYSTIMR instead of SYSTIML.  However we only
+        * need to provide nanosecond resolution, so we just ignore it.
+        */
+       jk = rd32(E1000_SYSTIMR);
+       lo = rd32(E1000_SYSTIML);
+       hi = rd32(E1000_SYSTIMH);
+
+       val = ((u64) hi) << 32;
+       val |= lo;
+
+       return val;
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       u64 rate;
+       u32 incvalue;
+       int neg_adj = 0;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+       struct e1000_hw *hw = &igb->hw;
+
+       if (ppb < 0) {
+               neg_adj = 1;
+               ppb = -ppb;
+       }
+       rate = ppb;
+       rate <<= 14;
+       rate = div_u64(rate, 1953125);
+
+       incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+
+       if (neg_adj)
+               incvalue -= rate;
+       else
+               incvalue += rate;
+
+       wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
+
+       return 0;
+}
+
+static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       u64 rate;
+       u32 inca;
+       int neg_adj = 0;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+       struct e1000_hw *hw = &igb->hw;
+
+       if (ppb < 0) {
+               neg_adj = 1;
+               ppb = -ppb;
+       }
+       rate = ppb;
+       rate <<= 26;
+       rate = div_u64(rate, 1953125);
+
+       inca = rate & INCVALUE_MASK;
+       if (neg_adj)
+               inca |= ISGN;
+
+       wr32(E1000_TIMINCA, inca);
+
+       return 0;
+}
+
+static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       s64 now;
+       unsigned long flags;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       now = timecounter_read(&igb->tc);
+       now += delta;
+       timecounter_init(&igb->tc, &igb->cc, now);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
+}
+
+static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       u64 ns;
+       u32 remainder;
+       unsigned long flags;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       ns = timecounter_read(&igb->tc);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+       ts->tv_nsec = remainder;
+
+       return 0;
+}
+
+static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
+{
+       u64 ns;
+       unsigned long flags;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+       ns = ts->tv_sec * 1000000000ULL;
+       ns += ts->tv_nsec;
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       timecounter_init(&igb->tc, &igb->cc, ns);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
+}
+
+static int ptp_82576_enable(struct ptp_clock_info *ptp,
+                           struct ptp_clock_request *rq, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+static int ptp_82580_enable(struct ptp_clock_info *ptp,
+                           struct ptp_clock_request *rq, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+static void igb_overflow_check(struct work_struct *work)
+{
+       struct timespec ts;
+       struct igb_adapter *igb =
+               container_of(work, struct igb_adapter, overflow_work.work);
+
+       igb_gettime(&igb->caps, &ts);
+
+       pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+       schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
+}
+
+void igb_ptp_init(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
+       switch (hw->mac.type) {
+       case e1000_i350:
+       case e1000_82580:
+               adapter->caps.owner     = THIS_MODULE;
+               strcpy(adapter->caps.name, "igb-82580");
+               adapter->caps.max_adj   = 62499999;
+               adapter->caps.n_ext_ts  = 0;
+               adapter->caps.pps       = 0;
+               adapter->caps.adjfreq   = ptp_82580_adjfreq;
+               adapter->caps.adjtime   = igb_adjtime;
+               adapter->caps.gettime   = igb_gettime;
+               adapter->caps.settime   = igb_settime;
+               adapter->caps.enable    = ptp_82580_enable;
+               adapter->cc.read        = igb_82580_systim_read;
+               adapter->cc.mask        = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+               adapter->cc.mult        = 1;
+               adapter->cc.shift       = 0;
+               /* Enable the timer functions by clearing bit 31. */
+               wr32(E1000_TSAUXC, 0x0);
+               break;
+
+       case e1000_82576:
+               adapter->caps.owner     = THIS_MODULE;
+               strcpy(adapter->caps.name, "igb-82576");
+               adapter->caps.max_adj   = 1000000000;
+               adapter->caps.n_ext_ts  = 0;
+               adapter->caps.pps       = 0;
+               adapter->caps.adjfreq   = ptp_82576_adjfreq;
+               adapter->caps.adjtime   = igb_adjtime;
+               adapter->caps.gettime   = igb_gettime;
+               adapter->caps.settime   = igb_settime;
+               adapter->caps.enable    = ptp_82576_enable;
+               adapter->cc.read        = igb_82576_systim_read;
+               adapter->cc.mask        = CLOCKSOURCE_MASK(64);
+               adapter->cc.mult        = 1;
+               adapter->cc.shift       = IGB_82576_TSYNC_SHIFT;
+               /* Dial the nominal frequency. */
+               wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+               break;
+
+       default:
+               adapter->ptp_clock = NULL;
+               return;
+       }
+
+       wrfl();
+
+       timecounter_init(&adapter->tc, &adapter->cc,
+                        ktime_to_ns(ktime_get_real()));
+
+       INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
+
+       spin_lock_init(&adapter->tmreg_lock);
+
+       schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
+
+       adapter->ptp_clock = ptp_clock_register(&adapter->caps);
+       if (IS_ERR(adapter->ptp_clock)) {
+               adapter->ptp_clock = NULL;
+               dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+       } else
+               dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+                        adapter->netdev->name);
+}
+
+void igb_ptp_remove(struct igb_adapter *adapter)
+{
+       cancel_delayed_work_sync(&adapter->overflow_work);
+
+       if (adapter->ptp_clock) {
+               ptp_clock_unregister(adapter->ptp_clock);
+               dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+                        adapter->netdev->name);
+       }
+}
+
+/**
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+                           struct skb_shared_hwtstamps *hwtstamps,
+                           u64 systim)
+{
+       u64 ns;
+       unsigned long flags;
+
+       switch (adapter->hw.mac.type) {
+       case e1000_i350:
+       case e1000_82580:
+       case e1000_82576:
+               break;
+       default:
+               return;
+       }
+
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+       ns = timecounter_cyc2time(&adapter->tc, systim);
+
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       memset(hwtstamps, 0, sizeof(*hwtstamps));
+       hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
index 85d2e2c4ce4a9b8d55fa69a0b010b291cb6d97f6..56fd46844f656e07c6feb3e244cb574061ab0452 100644 (file)
@@ -91,29 +91,6 @@ out:
        IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
 }
 
-/**
- *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
- *  @hw: pointer to hardware structure
- *
- *  Read PCIe configuration space, and get the MSI-X vector count from
- *  the capabilities table.
- **/
-static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
-{
-       struct ixgbe_adapter *adapter = hw->back;
-       u16 msix_count;
-       pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
-                            &msix_count);
-       msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
-       /* MSI-X count is zero-based in HW, so increment to give proper value */
-       msix_count++;
-
-       return msix_count;
-}
-
-/**
- */
 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
@@ -126,7 +103,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
        mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
        mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
        mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
-       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
 
        return 0;
 }
index 49aa41fe7b84411524ac60713686dbef619d59ba..e59888163a17050cdbd05ab7980b5f82e8c61473 100644 (file)
@@ -2783,17 +2783,36 @@ san_mac_addr_out:
  *  Read PCIe configuration space, and get the MSI-X vector count from
  *  the capabilities table.
  **/
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 {
        struct ixgbe_adapter *adapter = hw->back;
-       u16 msix_count;
-       pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
-                            &msix_count);
+       u16 msix_count = 1;
+       u16 max_msix_count;
+       u16 pcie_offset;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+               max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+               max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+               break;
+       default:
+               return msix_count;
+       }
+
+       pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
        msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
 
-       /* MSI-X count is zero-based in HW, so increment to give proper value */
+       /* MSI-X count is zero-based in HW */
        msix_count++;
 
+       if (msix_count > max_msix_count)
+               msix_count = max_msix_count;
+
        return msix_count;
 }
 
index 204f06235b455ce8a0d0c803471001f9c6a986f9..d6d34324540cfa2af051a41b47c50858e239719e 100644 (file)
@@ -31,7 +31,7 @@
 #include "ixgbe_type.h"
 #include "ixgbe.h"
 
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
index 8636e8344fc943bbafe10eae48be83eff760f6cc..ffa6679e943bb60a586279bdd7a119cf657069d6 100644 (file)
@@ -1681,7 +1681,9 @@ enum {
 #define IXGBE_DEVICE_CAPS       0x2C
 #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
 #define IXGBE_PCIE_MSIX_82599_CAPS  0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599   0x40
 #define IXGBE_PCIE_MSIX_82598_CAPS  0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598   0x13
 
 /* MSI-X capability fields masks */
 #define IXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF
@@ -2813,6 +2815,7 @@ struct ixgbe_mac_info {
        u16                             wwnn_prefix;
        /* prefix for World Wide Port Name (WWPN) */
        u16                             wwpn_prefix;
+       u16                             max_msix_vectors;
 #define IXGBE_MAX_MTA                  128
        u32                             mta_shadow[IXGBE_MAX_MTA];
        s32                             mc_filter_type;
@@ -2823,7 +2826,6 @@ struct ixgbe_mac_info {
        u32                             rx_pb_size;
        u32                             max_tx_queues;
        u32                             max_rx_queues;
-       u32                             max_msix_vectors;
        u32                             orig_autoc;
        u32                             orig_autoc2;
        bool                            orig_link_settings_stored;
index 5e1ca0f0509098dd6e571b289df35f8886d7b21d..c8950da60e6ba981a401365e431009bfc338834c 100644 (file)
@@ -1665,6 +1665,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
        .get_strings            = mv643xx_eth_get_strings,
        .get_ethtool_stats      = mv643xx_eth_get_ethtool_stats,
        .get_sset_count         = mv643xx_eth_get_sset_count,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 
index efec6b60b3276e891b9e2930e237e810e4311b27..1db023b075a10a0889b0e54799a88debed0de3ad 100644 (file)
@@ -1456,6 +1456,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = {
        .set_settings = pxa168_set_settings,
        .get_drvinfo = pxa168_get_drvinfo,
        .get_link = ethtool_op_get_link,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops pxa168_eth_netdev_ops = {
index c9b504e2dfc3bfdc4707dd0f3db240b1f365ec0c..7732474263da66d6d479f6f216ca99f376d99583 100644 (file)
@@ -4816,14 +4816,14 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
 
        init_waitqueue_head(&hw->msi_wait);
 
-       sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
-
        err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
        if (err) {
                dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
                return err;
        }
 
+       sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
        sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
        sky2_read8(hw, B0_CTST);
 
index 1bb93531f1ba0e33ecc448bc38aaec91005de137..5f027f95cc8422fe6887cf41b42c2572b6f809e9 100644 (file)
@@ -11,6 +11,18 @@ config MLX4_EN
          This driver supports Mellanox Technologies ConnectX Ethernet
          devices.
 
+config MLX4_EN_DCB
+       bool "Data Center Bridging (DCB) Support"
+       default y
+       depends on MLX4_EN && DCB
+       ---help---
+         Say Y here if you want to use Data Center Bridging (DCB) in the
+         driver.
+         If set to N, will not be able to configure QoS and ratelimit attributes.
+         This flag is depended on the kernel's DCB support.
+
+         If unsure, set to Y
+
 config MLX4_CORE
        tristate
        depends on PCI
index 4a40ab967eeb806d508d14c79c04503dc26ac5e4..293127d28b33eec76ba2edcd6ee147695db0a119 100644 (file)
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
 mlx4_en-y :=   en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
                en_resources.o en_netdev.o en_selftest.o
+mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
new file mode 100644 (file)
index 0000000..0cc6c96
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/dcbnl.h>
+
+#include "mlx4_en.h"
+
+static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
+                                  struct ieee_ets *ets)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct ieee_ets *my_ets = &priv->ets;
+
+       /* No IEEE PFC settings available */
+       if (!my_ets)
+               return -EINVAL;
+
+       ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+       ets->cbs = my_ets->cbs;
+       memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+       memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+       memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+
+       return 0;
+}
+
+static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
+{
+       int i;
+       int total_ets_bw = 0;
+       int has_ets_tc = 0;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
+                       en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
+                                       i, ets->prio_tc[i]);
+                       return -EINVAL;
+               }
+
+               switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       has_ets_tc = 1;
+                       total_ets_bw += ets->tc_tx_bw[i];
+                       break;
+               default:
+                       en_err(priv, "TC[%d]: Not supported TSA: %d\n",
+                                       i, ets->tc_tsa[i]);
+                       return -ENOTSUPP;
+               }
+       }
+
+       if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
+               en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
+                               total_ets_bw);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
+               struct ieee_ets *ets, u16 *ratelimit)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int num_strict = 0;
+       int i;
+       __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
+       __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
+
+       ets = ets ?: &priv->ets;
+       ratelimit = ratelimit ?: priv->maxrate;
+
+       /* higher TC means higher priority => lower pg */
+       for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
+               switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       pg[i] = num_strict++;
+                       tc_tx_bw[i] = MLX4_EN_BW_MAX;
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       pg[i] = MLX4_EN_TC_ETS;
+                       tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
+                       break;
+               }
+       }
+
+       return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
+                       ratelimit);
+}
+
+static int
+mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+
+       err = mlx4_en_ets_validate(priv, ets);
+       if (err)
+               return err;
+
+       err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
+       if (err)
+               return err;
+
+       err = mlx4_en_config_port_scheduler(priv, ets, NULL);
+       if (err)
+               return err;
+
+       memcpy(&priv->ets, ets, sizeof(priv->ets));
+
+       return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
+               struct ieee_pfc *pfc)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+       pfc->pfc_en = priv->prof->tx_ppp;
+
+       return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
+               struct ieee_pfc *pfc)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+
+       en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
+                       pfc->pfc_cap,
+                       pfc->pfc_en,
+                       pfc->mbc,
+                       pfc->delay);
+
+       priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
+       priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+
+       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                   priv->rx_skb_size + ETH_FCS_LEN,
+                                   priv->prof->tx_pause,
+                                   priv->prof->tx_ppp,
+                                   priv->prof->rx_pause,
+                                   priv->prof->rx_ppp);
+       if (err)
+               en_err(priv, "Failed setting pause params\n");
+
+       return err;
+}
+
+static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
+{
+       return DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+       if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+           (mode & DCB_CAP_DCBX_VER_CEE) ||
+           !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+           !(mode & DCB_CAP_DCBX_HOST))
+               return 1;
+
+       return 0;
+}
+
+#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
+static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
+                                  struct ieee_maxrate *maxrate)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int i;
+
+       if (!priv->maxrate)
+               return -EINVAL;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+               maxrate->tc_maxrate[i] =
+                       priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
+
+       return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
+               struct ieee_maxrate *maxrate)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       u16 tmp[IEEE_8021QAZ_MAX_TCS];
+       int i, err;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               /* Convert from Kbps into HW units, rounding result up.
+                * Setting to 0, means unlimited BW.
+                */
+               tmp[i] =
+                       (maxrate->tc_maxrate[i] + MLX4_RATELIMIT_UNITS_IN_KB -
+                        1) / MLX4_RATELIMIT_UNITS_IN_KB;
+       }
+
+       err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
+       if (err)
+               return err;
+
+       memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
+
+       return 0;
+}
+
+const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
+       .ieee_getets    = mlx4_en_dcbnl_ieee_getets,
+       .ieee_setets    = mlx4_en_dcbnl_ieee_setets,
+       .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+       .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
+       .ieee_getpfc    = mlx4_en_dcbnl_ieee_getpfc,
+       .ieee_setpfc    = mlx4_en_dcbnl_ieee_setpfc,
+
+       .getdcbx        = mlx4_en_dcbnl_getdcbx,
+       .setdcbx        = mlx4_en_dcbnl_setdcbx,
+};
index 2097a7d3c5b82dabb482d9b670feef0ec1733d19..346fdb2e92a6bcac3da8ff5d3f2e39d5b7313c8a 100644 (file)
@@ -114,7 +114,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
                params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
                params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
                params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
-                       (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+                       MLX4_EN_NUM_PPP_RINGS;
                params->prof[i].rss_rings = 0;
        }
 
index 31b455a49273f1a3d877f5c7e8cea1515de1963a..35003ada04ec53fbe44c9821f2a2ad707c313f02 100644 (file)
 #include "mlx4_en.h"
 #include "en_port.h"
 
+static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
+{
+       if (up != MLX4_EN_NUM_UP)
+               return -EINVAL;
+
+       return 0;
+}
+
 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -650,7 +658,8 @@ int mlx4_en_start_port(struct net_device *dev)
 
                /* Configure ring */
                tx_ring = &priv->tx_ring[i];
-               err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
+               err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
+                               max(0, i - MLX4_EN_NUM_TX_RINGS));
                if (err) {
                        en_err(priv, "Failed allocating Tx ring\n");
                        mlx4_en_deactivate_cq(priv, cq);
@@ -966,6 +975,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        mutex_unlock(&mdev->state_lock);
 
        mlx4_en_free_resources(priv);
+
        free_netdev(dev);
 }
 
@@ -1036,6 +1046,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_poll_controller    = mlx4_en_netpoll,
 #endif
        .ndo_set_features       = mlx4_en_set_features,
+       .ndo_setup_tc           = mlx4_en_setup_tc,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1079,6 +1090,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
        INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
        INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+#ifdef CONFIG_MLX4_EN_DCB
+       if (!mlx4_is_slave(priv->mdev->dev))
+               dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
+#endif
 
        /* Query for default mac and max mtu */
        priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
@@ -1113,6 +1128,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
+       netdev_set_num_tc(dev, MLX4_EN_NUM_UP);
+
+       /* First 9 rings are for UP 0 */
+       netdev_set_tc_queue(dev, 0, MLX4_EN_NUM_TX_RINGS + 1, 0);
+
+       /* Partition Tx queues evenly amongst UP's 1-7 */
+       for (i = 1; i < MLX4_EN_NUM_UP; i++)
+               netdev_set_tc_queue(dev, i, 1, MLX4_EN_NUM_TX_RINGS + i);
+
        SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
 
        /* Set defualt MAC */
index 6934fd7e66ed766c987aff56ffbe6661e5667b3f..745090b49d9efe2792ec010314752473f697eb11 100644 (file)
@@ -39,6 +39,8 @@
 #define SET_PORT_PROMISC_SHIFT 31
 #define SET_PORT_MC_PROMISC_SHIFT      30
 
+#define MLX4_EN_NUM_TC         8
+
 #define VLAN_FLTR_SIZE 128
 struct mlx4_set_vlan_fltr_mbox {
        __be32 entry[VLAN_FLTR_SIZE];
index bcbc54c16947eac20f840f929f5d9ea3a5be3f91..10c24c784b70748ebc6f0eecba52e9461fab86d4 100644 (file)
@@ -39,7 +39,7 @@
 
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
                             int is_tx, int rss, int qpn, int cqn,
-                            struct mlx4_qp_context *context)
+                            int user_prio, struct mlx4_qp_context *context)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
 
@@ -57,6 +57,10 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        context->local_qpn = cpu_to_be32(qpn);
        context->pri_path.ackto = 1 & 0x07;
        context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+       if (user_prio >= 0) {
+               context->pri_path.sched_queue |= user_prio << 3;
+               context->pri_path.feup = 1 << 6;
+       }
        context->pri_path.counter_index = 0xff;
        context->cqn_send = cpu_to_be32(cqn);
        context->cqn_recv = cpu_to_be32(cqn);
index 9adbd53da525ba59b5d9f1378e347b029533f0d7..d49a7ac3187d3c451d67638d48349a0e2d0d55d0 100644 (file)
@@ -823,7 +823,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
 
        memset(context, 0, sizeof *context);
        mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
-                               qpn, ring->cqn, context);
+                               qpn, ring->cqn, -1, context);
        context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
 
        /* Cancel FCS removal if FW allows */
@@ -890,7 +890,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        }
        rss_map->indir_qp.event = mlx4_en_sqp_event;
        mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
-                               priv->rx_ring[0].cqn, &context);
+                               priv->rx_ring[0].cqn, -1, &context);
 
        if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
                rss_rings = priv->rx_ring_num;
index 17968244c399509b169540388b8b1d971739eef1..d9bab5338c2f809da7f62d8957c7ff68e7374cef 100644 (file)
@@ -156,7 +156,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
 
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_tx_ring *ring,
-                            int cq)
+                            int cq, int user_prio)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
@@ -174,7 +174,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
        ring->doorbell_qpn = ring->qp.qpn << 8;
 
        mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
-                               ring->cqn, &ring->context);
+                               ring->cqn, user_prio, &ring->context);
        if (ring->bf_enabled)
                ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
 
@@ -570,13 +570,9 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
 
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
-       struct mlx4_en_priv *priv = netdev_priv(dev);
        u16 vlan_tag = 0;
 
-       /* If we support per priority flow control and the packet contains
-        * a vlan tag, send the packet to the TX ring assigned to that priority
-        */
-       if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                vlan_tag = vlan_tx_tag_get(skb);
                return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
        }
index 2a0ff2cc7182e02bdc69373d3cb0fd3cd32ad1ab..cd56f1aea4b505aad5545b515c534dffe6bef1f1 100644 (file)
 #define DRV_VERSION    "1.1"
 #define DRV_RELDATE    "Dec, 2011"
 
+#define MLX4_NUM_UP            8
+#define MLX4_NUM_TC            8
+#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
+#define MLX4_RATELIMIT_DEFAULT 0xffff
+
+struct mlx4_set_port_prio2tc_context {
+       u8 prio2tc[4];
+};
+
+struct mlx4_port_scheduler_tc_cfg_be {
+       __be16 pg;
+       __be16 bw_precentage;
+       __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
+       __be16 max_bw_value;
+};
+
+struct mlx4_set_port_scheduler_context {
+       struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
+};
+
 enum {
        MLX4_HCR_BASE           = 0x80680,
        MLX4_HCR_SIZE           = 0x0001c,
index d69fee41f24aa296106063f06826a06096b07f42..47e1c0ff1775a660d78eba68726a89fce1b66813 100644 (file)
@@ -40,6 +40,9 @@
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
+#ifdef CONFIG_MLX4_EN_DCB
+#include <linux/dcbnl.h>
+#endif
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -111,6 +114,7 @@ enum {
 #define MLX4_EN_NUM_TX_RINGS           8
 #define MLX4_EN_NUM_PPP_RINGS          8
 #define MAX_TX_RINGS                   (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
+#define MLX4_EN_NUM_UP                 8
 #define MLX4_EN_DEF_TX_RING_SIZE       512
 #define MLX4_EN_DEF_RX_RING_SIZE       1024
 
@@ -411,6 +415,15 @@ struct mlx4_en_frag_info {
 
 };
 
+#ifdef CONFIG_MLX4_EN_DCB
+/* Minimal TC BW - setting to 0 will block traffic */
+#define MLX4_EN_BW_MIN 1
+#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+
+#define MLX4_EN_TC_ETS 7
+
+#endif
+
 struct mlx4_en_priv {
        struct mlx4_en_dev *mdev;
        struct mlx4_en_port_profile *prof;
@@ -484,6 +497,11 @@ struct mlx4_en_priv {
        int vids[128];
        bool wol;
        struct device *ddev;
+
+#ifdef CONFIG_MLX4_EN_DCB
+       struct ieee_ets ets;
+       u16 maxrate[IEEE_8021QAZ_MAX_TCS];
+#endif
 };
 
 enum mlx4_en_wol {
@@ -522,7 +540,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri
 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_tx_ring *ring,
-                            int cq);
+                            int cq, int user_prio);
 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
                                struct mlx4_en_tx_ring *ring);
 
@@ -540,8 +558,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
                          int budget);
 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
-                            int is_tx, int rss, int qpn, int cqn,
-                            struct mlx4_qp_context *context);
+               int is_tx, int rss, int qpn, int cqn, int user_prio,
+               struct mlx4_qp_context *context);
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
 int mlx4_en_map_buffer(struct mlx4_buf *buf);
 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
@@ -558,6 +576,10 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
 int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
 
+#ifdef CONFIG_MLX4_EN_DCB
+extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
+#endif
+
 #define MLX4_EN_NUM_SELF_TEST  5
 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
 u64 mlx4_en_mac_to_u64(u8 *addr);
index 77535ff18f1b0ae1e33711ed948ad8bf68fe7ddb..55b12e6bed876df8187b80fc210da9ad5879a13a 100644 (file)
@@ -834,6 +834,68 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
 
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_prio2tc_context *context;
+       int err;
+       u32 in_mod;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       for (i = 0; i < MLX4_NUM_UP; i += 2)
+               context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
+
+       in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
+
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+               u8 *pg, u16 *ratelimit)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_scheduler_context *context;
+       int err;
+       u32 in_mod;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       for (i = 0; i < MLX4_NUM_TC; i++) {
+               struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
+               u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
+                       MLX4_RATELIMIT_DEFAULT;
+
+               tc->pg = htons(pg[i]);
+               tc->bw_precentage = htons(tc_tx_bw[i]);
+
+               tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
+               tc->max_bw_value = htons(r);
+       }
+
+       in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+
 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
                                struct mlx4_vhcr *vhcr,
                                struct mlx4_cmd_mailbox *inbox,
index 27273ae1a6e6d0d48d0bb62ef80c5e9de079fcee..90153fc983cbad58cb3a68ae6ff4f9bb5c85762b 100644 (file)
@@ -4033,7 +4033,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netdev->netdev_ops = &myri10ge_netdev_ops;
        netdev->mtu = myri10ge_initial_mtu;
-       netdev->base_addr = mgp->iomem_base;
        netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
        netdev->features = netdev->hw_features;
 
@@ -4047,12 +4046,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->vlan_features &= ~NETIF_F_TSO;
 
        /* make sure we can get an irq, and that MSI can be
-        * setup (if available).  Also ensure netdev->irq
-        * is set to correct value if MSI is enabled */
+        * setup (if available). */
        status = myri10ge_request_irq(mgp);
        if (status != 0)
                goto abort_with_firmware;
-       netdev->irq = pdev->irq;
        myri10ge_free_irq(mgp);
 
        /* Save configuration space to be restored if the
@@ -4077,7 +4074,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        else
                dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
                         mgp->msi_enabled ? "MSI" : "xPIC",
-                        netdev->irq, mgp->tx_boundary, mgp->fw_name,
+                        pdev->irq, mgp->tx_boundary, mgp->fw_name,
                         (mgp->wc_enabled ? "Enabled" : "Disabled"));
 
        board_number++;
index d38e48d4f43057ca8fde7684c5dfb2786c9cadae..5b61d12f8b91127a3439de1ad71ae6d7b27e6e2c 100644 (file)
@@ -547,6 +547,7 @@ struct netdev_private {
        struct sk_buff *tx_skbuff[TX_RING_SIZE];
        dma_addr_t tx_dma[TX_RING_SIZE];
        struct net_device *dev;
+       void __iomem *ioaddr;
        struct napi_struct napi;
        /* Media monitoring timer */
        struct timer_list timer;
@@ -699,7 +700,9 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
 
 static inline void __iomem *ns_ioaddr(struct net_device *dev)
 {
-       return (void __iomem *) dev->base_addr;
+       struct netdev_private *np = netdev_priv(dev);
+
+       return np->ioaddr;
 }
 
 static inline void natsemi_irq_enable(struct net_device *dev)
@@ -863,10 +866,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
        /* Store MAC Address in perm_addr */
        memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
 
-       dev->base_addr = (unsigned long __force) ioaddr;
-       dev->irq = irq;
-
        np = netdev_priv(dev);
+       np->ioaddr = ioaddr;
+
        netif_napi_add(dev, &np->napi, natsemi_poll, 64);
        np->dev = dev;
 
@@ -914,9 +916,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
        }
 
        option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
-       if (dev->mem_start)
-               option = dev->mem_start;
-
        /* The lower four bits are the media type. */
        if (option) {
                if (option & 0x200)
@@ -1532,20 +1531,21 @@ static int netdev_open(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem * ioaddr = ns_ioaddr(dev);
+       const int irq = np->pci_dev->irq;
        int i;
 
        /* Reset the chip, just in case. */
        natsemi_reset(dev);
 
-       i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+       i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
        if (i) return i;
 
        if (netif_msg_ifup(np))
                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-                       dev->name, dev->irq);
+                       dev->name, irq);
        i = alloc_ring(dev);
        if (i < 0) {
-               free_irq(dev->irq, dev);
+               free_irq(irq, dev);
                return i;
        }
        napi_enable(&np->napi);
@@ -1794,6 +1794,7 @@ static void netdev_timer(unsigned long data)
        struct netdev_private *np = netdev_priv(dev);
        void __iomem * ioaddr = ns_ioaddr(dev);
        int next_tick = NATSEMI_TIMER_FREQ;
+       const int irq = np->pci_dev->irq;
 
        if (netif_msg_timer(np)) {
                /* DO NOT read the IntrStatus register,
@@ -1817,14 +1818,14 @@ static void netdev_timer(unsigned long data)
                                if (netif_msg_drv(np))
                                        printk(KERN_NOTICE "%s: possible phy reset: "
                                                "re-initializing\n", dev->name);
-                               disable_irq(dev->irq);
+                               disable_irq(irq);
                                spin_lock_irq(&np->lock);
                                natsemi_stop_rxtx(dev);
                                dump_ring(dev);
                                reinit_ring(dev);
                                init_registers(dev);
                                spin_unlock_irq(&np->lock);
-                               enable_irq(dev->irq);
+                               enable_irq(irq);
                        } else {
                                /* hurry back */
                                next_tick = HZ;
@@ -1841,10 +1842,10 @@ static void netdev_timer(unsigned long data)
                spin_unlock_irq(&np->lock);
        }
        if (np->oom) {
-               disable_irq(dev->irq);
+               disable_irq(irq);
                np->oom = 0;
                refill_rx(dev);
-               enable_irq(dev->irq);
+               enable_irq(irq);
                if (!np->oom) {
                        writel(RxOn, ioaddr + ChipCmd);
                } else {
@@ -1885,8 +1886,9 @@ static void ns_tx_timeout(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem * ioaddr = ns_ioaddr(dev);
+       const int irq = np->pci_dev->irq;
 
-       disable_irq(dev->irq);
+       disable_irq(irq);
        spin_lock_irq(&np->lock);
        if (!np->hands_off) {
                if (netif_msg_tx_err(np))
@@ -1905,7 +1907,7 @@ static void ns_tx_timeout(struct net_device *dev)
                        dev->name);
        }
        spin_unlock_irq(&np->lock);
-       enable_irq(dev->irq);
+       enable_irq(irq);
 
        dev->trans_start = jiffies; /* prevent tx timeout */
        dev->stats.tx_errors++;
@@ -2470,9 +2472,12 @@ static struct net_device_stats *get_stats(struct net_device *dev)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void natsemi_poll_controller(struct net_device *dev)
 {
-       disable_irq(dev->irq);
-       intr_handler(dev->irq, dev);
-       enable_irq(dev->irq);
+       struct netdev_private *np = netdev_priv(dev);
+       const int irq = np->pci_dev->irq;
+
+       disable_irq(irq);
+       intr_handler(irq, dev);
+       enable_irq(irq);
 }
 #endif
 
@@ -2523,8 +2528,9 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
        if (netif_running(dev)) {
                struct netdev_private *np = netdev_priv(dev);
                void __iomem * ioaddr = ns_ioaddr(dev);
+               const int irq = np->pci_dev->irq;
 
-               disable_irq(dev->irq);
+               disable_irq(irq);
                spin_lock(&np->lock);
                /* stop engines */
                natsemi_stop_rxtx(dev);
@@ -2537,7 +2543,7 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
                /* restart engines */
                writel(RxOn | TxOn, ioaddr + ChipCmd);
                spin_unlock(&np->lock);
-               enable_irq(dev->irq);
+               enable_irq(irq);
        }
        return 0;
 }
@@ -3135,6 +3141,7 @@ static int netdev_close(struct net_device *dev)
 {
        void __iomem * ioaddr = ns_ioaddr(dev);
        struct netdev_private *np = netdev_priv(dev);
+       const int irq = np->pci_dev->irq;
 
        if (netif_msg_ifdown(np))
                printk(KERN_DEBUG
@@ -3156,14 +3163,14 @@ static int netdev_close(struct net_device *dev)
         */
 
        del_timer_sync(&np->timer);
-       disable_irq(dev->irq);
+       disable_irq(irq);
        spin_lock_irq(&np->lock);
        natsemi_irq_disable(dev);
        np->hands_off = 1;
        spin_unlock_irq(&np->lock);
-       enable_irq(dev->irq);
+       enable_irq(irq);
 
-       free_irq(dev->irq, dev);
+       free_irq(irq, dev);
 
        /* Interrupt disabled, interrupt handler released,
         * queue stopped, timer deleted, rtnl_lock held
@@ -3256,9 +3263,11 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
 
        rtnl_lock();
        if (netif_running (dev)) {
+               const int irq = np->pci_dev->irq;
+
                del_timer_sync(&np->timer);
 
-               disable_irq(dev->irq);
+               disable_irq(irq);
                spin_lock_irq(&np->lock);
 
                natsemi_irq_disable(dev);
@@ -3267,7 +3276,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
                netif_stop_queue(dev);
 
                spin_unlock_irq(&np->lock);
-               enable_irq(dev->irq);
+               enable_irq(irq);
 
                napi_disable(&np->napi);
 
@@ -3307,6 +3316,8 @@ static int natsemi_resume (struct pci_dev *pdev)
        if (netif_device_present(dev))
                goto out;
        if (netif_running(dev)) {
+               const int irq = np->pci_dev->irq;
+
                BUG_ON(!np->hands_off);
                ret = pci_enable_device(pdev);
                if (ret < 0) {
@@ -3320,13 +3331,13 @@ static int natsemi_resume (struct pci_dev *pdev)
 
                natsemi_reset(dev);
                init_ring(dev);
-               disable_irq(dev->irq);
+               disable_irq(irq);
                spin_lock_irq(&np->lock);
                np->hands_off = 0;
                init_registers(dev);
                netif_device_attach(dev);
                spin_unlock_irq(&np->lock);
-               enable_irq(dev->irq);
+               enable_irq(irq);
 
                mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
        }
index 6338ef8606ae3af9133ce53a1188f22230e2a73e..bb367582c1e878ad7d484ccdae9c7ed8bca27e9c 100644 (file)
@@ -2846,6 +2846,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
 static void s2io_netpoll(struct net_device *dev)
 {
        struct s2io_nic *nic = netdev_priv(dev);
+       const int irq = nic->pdev->irq;
        struct XENA_dev_config __iomem *bar0 = nic->bar0;
        u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
        int i;
@@ -2855,7 +2856,7 @@ static void s2io_netpoll(struct net_device *dev)
        if (pci_channel_offline(nic->pdev))
                return;
 
-       disable_irq(dev->irq);
+       disable_irq(irq);
 
        writeq(val64, &bar0->rx_traffic_int);
        writeq(val64, &bar0->tx_traffic_int);
@@ -2884,7 +2885,7 @@ static void s2io_netpoll(struct net_device *dev)
                        break;
                }
        }
-       enable_irq(dev->irq);
+       enable_irq(irq);
 }
 #endif
 
@@ -3897,9 +3898,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
 
 static void remove_inta_isr(struct s2io_nic *sp)
 {
-       struct net_device *dev = sp->dev;
-
-       free_irq(sp->pdev->irq, dev);
+       free_irq(sp->pdev->irq, sp->dev);
 }
 
 /* ********************************************************* *
@@ -7046,7 +7045,7 @@ static int s2io_add_isr(struct s2io_nic *sp)
                }
        }
        if (sp->config.intr_type == INTA) {
-               err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
+               err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
                                  sp->name, dev);
                if (err) {
                        DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
@@ -7908,9 +7907,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto bar1_remap_failed;
        }
 
-       dev->irq = pdev->irq;
-       dev->base_addr = (unsigned long)sp->bar0;
-
        /* Initializing the BAR1 address as the start of the FIFO pointer. */
        for (j = 0; j < MAX_TX_FIFOS; j++) {
                mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
index ef76725454d250126b18d98998deca4b2fb7212d..51387c31914b4d08508c614a424e307dd58c04d3 100644 (file)
@@ -1882,25 +1882,24 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
  */
 static void vxge_netpoll(struct net_device *dev)
 {
-       struct __vxge_hw_device *hldev;
-       struct vxgedev *vdev;
-
-       vdev = netdev_priv(dev);
-       hldev = pci_get_drvdata(vdev->pdev);
+       struct vxgedev *vdev = netdev_priv(dev);
+       struct pci_dev *pdev = vdev->pdev;
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
+       const int irq = pdev->irq;
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       if (pci_channel_offline(vdev->pdev))
+       if (pci_channel_offline(pdev))
                return;
 
-       disable_irq(dev->irq);
+       disable_irq(irq);
        vxge_hw_device_clear_tx_rx(hldev);
 
        vxge_hw_device_clear_tx_rx(hldev);
        VXGE_COMPLETE_ALL_RX(vdev);
        VXGE_COMPLETE_ALL_TX(vdev);
 
-       enable_irq(dev->irq);
+       enable_irq(irq);
 
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d  Exiting...", __func__, __LINE__);
@@ -2860,12 +2859,12 @@ static int vxge_open(struct net_device *dev)
                vdev->config.rx_pause_enable);
 
        if (vdev->vp_reset_timer.function == NULL)
-               vxge_os_timer(vdev->vp_reset_timer,
-                       vxge_poll_vp_reset, vdev, (HZ/2));
+               vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
+                             HZ / 2);
 
        /* There is no need to check for RxD leak and RxD lookup on Titan1A */
        if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
-               vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+               vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
                              HZ / 2);
 
        set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -3424,9 +3423,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        ndev->features |= ndev->hw_features |
                NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 
-       /*  Driver entry points */
-       ndev->irq = vdev->pdev->irq;
-       ndev->base_addr = (unsigned long) hldev->bar0;
 
        ndev->netdev_ops = &vxge_netdev_ops;
 
index f52a42d1dbb7af241c00039dcb74d5486d2406bf..35f3e7552ec294293115fdcf0be415fa9b2b04d9 100644 (file)
@@ -416,12 +416,15 @@ struct vxge_tx_priv {
        static int p = val; \
        module_param(p, int, 0)
 
-#define vxge_os_timer(timer, handle, arg, exp) do { \
-               init_timer(&timer); \
-               timer.function = handle; \
-               timer.data = (unsigned long) arg; \
-               mod_timer(&timer, (jiffies + exp)); \
-       } while (0);
+static inline
+void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
+                  struct vxgedev *vdev, unsigned long timeout)
+{
+       init_timer(timer);
+       timer->function = func;
+       timer->data = (unsigned long)vdev;
+       mod_timer(timer, jiffies + timeout);
+}
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev);
 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
index aca13046e4326a1fd2f6be0864dac06749e2647b..d93a088debc3cb0bf61ea3e7b5683e2924f1f167 100644 (file)
@@ -3942,13 +3942,11 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                ret = pci_enable_msi(np->pci_dev);
                if (ret == 0) {
                        np->msi_flags |= NV_MSI_ENABLED;
-                       dev->irq = np->pci_dev->irq;
                        if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
                                netdev_info(dev, "request_irq failed %d\n",
                                            ret);
                                pci_disable_msi(np->pci_dev);
                                np->msi_flags &= ~NV_MSI_ENABLED;
-                               dev->irq = np->pci_dev->irq;
                                goto out_err;
                        }
 
@@ -5649,9 +5647,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        np->base = ioremap(addr, np->register_size);
        if (!np->base)
                goto out_relreg;
-       dev->base_addr = (unsigned long)np->base;
-
-       dev->irq = pci_dev->irq;
 
        np->rx_ring_size = RX_RING_DEFAULT;
        np->tx_ring_size = TX_RING_DEFAULT;
index 6dfc26d85e4747bb645d577da44d8840a998405d..d3469d8e3f0d318dc7b9687df50fd8d8222a6d38 100644 (file)
@@ -990,10 +990,10 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
                        ndev->stats.rx_errors++;
                } else {
                        /* Packet is good */
-                       skb = dev_alloc_skb(len + 8);
-                       if (!skb)
+                       skb = dev_alloc_skb(len);
+                       if (!skb) {
                                ndev->stats.rx_dropped++;
-                       else {
+                       else {
                                prdbuf = skb_put(skb, len);
 
                                /* Copy packet from buffer */
index 0d29f5f4b8e41083a9114d5b2e43b7274bc1d04d..c2367158350ec8680da96b1ff8ecfd1099debd18 100644 (file)
@@ -683,8 +683,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
        }
 
        hmp->base = ioaddr;
-       dev->base_addr = (unsigned long)ioaddr;
-       dev->irq = irq;
        pci_set_drvdata(pdev, dev);
 
        hmp->chip_id = chip_id;
@@ -859,14 +857,11 @@ static int hamachi_open(struct net_device *dev)
        u32 rx_int_var, tx_int_var;
        u16 fifo_info;
 
-       i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev);
+       i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
+                       dev->name, dev);
        if (i)
                return i;
 
-       if (hamachi_debug > 1)
-               printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
-                          dev->name, dev->irq);
-
        hamachi_init_ring(dev);
 
 #if ADDRLEN == 64
@@ -1705,7 +1700,7 @@ static int hamachi_close(struct net_device *dev)
        }
 #endif /* __i386__ debugging only */
 
-       free_irq(dev->irq, dev);
+       free_irq(hmp->pci_dev->irq, dev);
 
        del_timer_sync(&hmp->timer);
 
index 7757b80ef924b8000c2b7e13066c0961566a38ee..04e622fd468d89acdaa758b4492434730fea0e0c 100644 (file)
@@ -427,9 +427,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
        /* Reset the chip. */
        iowrite32(0x80000000, ioaddr + DMACtrl);
 
-       dev->base_addr = (unsigned long)ioaddr;
-       dev->irq = irq;
-
        pci_set_drvdata(pdev, dev);
        spin_lock_init(&np->lock);
 
@@ -569,25 +566,20 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
 static int yellowfin_open(struct net_device *dev)
 {
        struct yellowfin_private *yp = netdev_priv(dev);
+       const int irq = yp->pci_dev->irq;
        void __iomem *ioaddr = yp->base;
-       int i, ret;
+       int i, rc;
 
        /* Reset the chip. */
        iowrite32(0x80000000, ioaddr + DMACtrl);
 
-       ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
-       if (ret)
-               return ret;
-
-       if (yellowfin_debug > 1)
-               netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
-                             __func__, dev->irq);
+       rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+       if (rc)
+               return rc;
 
-       ret = yellowfin_init_ring(dev);
-       if (ret) {
-               free_irq(dev->irq, dev);
-               return ret;
-       }
+       rc = yellowfin_init_ring(dev);
+       if (rc < 0)
+               goto err_free_irq;
 
        iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
        iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -647,8 +639,12 @@ static int yellowfin_open(struct net_device *dev)
        yp->timer.data = (unsigned long)dev;
        yp->timer.function = yellowfin_timer;                           /* timer handler */
        add_timer(&yp->timer);
+out:
+       return rc;
 
-       return 0;
+err_free_irq:
+       free_irq(irq, dev);
+       goto out;
 }
 
 static void yellowfin_timer(unsigned long data)
@@ -1251,7 +1247,7 @@ static int yellowfin_close(struct net_device *dev)
        }
 #endif /* __i386__ debugging only */
 
-       free_irq(dev->irq, dev);
+       free_irq(yp->pci_dev->irq, dev);
 
        /* Free all the skbuffs in the Rx queue. */
        for (i = 0; i < RX_RING_SIZE; i++) {
index b96e1920e0459ac7ca12d5d81037b3d66e88e878..4de73643fec676396835c3c7582c287d8648b322 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
  * Copyright (C) 2007
  *     Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
- *     Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
 #define MT_ICR         0x0C    /* TX interrupt control */
 #define MR_ICR         0x10    /* RX interrupt control */
 #define MTPR           0x14    /* TX poll command register */
+#define  TM2TX         0x0001  /* Trigger MAC to transmit */
 #define MR_BSR         0x18    /* RX buffer size */
 #define MR_DCR         0x1A    /* RX descriptor control */
 #define MLSR           0x1C    /* Last status */
+#define  TX_FIFO_UNDR  0x0200  /* TX FIFO under-run */
+#define         TX_EXCEEDC     0x2000  /* Transmit exceed collision */
+#define  TX_LATEC      0x4000  /* Transmit late collision */
 #define MMDIO          0x20    /* MDIO control register */
 #define  MDIO_WRITE    0x4000  /* MDIO write */
 #define  MDIO_READ     0x2000  /* MDIO read */
 #define MID_3M         0x82    /* MID3 Medium */
 #define MID_3H         0x84    /* MID3 High */
 #define PHY_CC         0x88    /* PHY status change configuration register */
+#define  SCEN          0x8000  /* PHY status change enable */
+#define  PHYAD_SHIFT   8       /* PHY address shift */
+#define  TMRDIV_SHIFT  0       /* Timer divider shift */
 #define PHY_ST         0x8A    /* PHY status register */
 #define MAC_SM         0xAC    /* MAC status machine */
 #define  MAC_SM_RST    0x0002  /* MAC status machine reset */
 #define MBCR_DEFAULT   0x012A  /* MAC Bus Control Register */
 #define MCAST_MAX      3       /* Max number multicast addresses to filter */
 
+#define MAC_DEF_TIMEOUT        2048    /* Default MAC read/write operation timeout */
+
 /* Descriptor status */
 #define DSC_OWNER_MAC  0x8000  /* MAC is the owner of this descriptor */
 #define DSC_RX_OK      0x4000  /* RX was successful */
@@ -187,7 +196,7 @@ struct r6040_private {
        dma_addr_t rx_ring_dma;
        dma_addr_t tx_ring_dma;
        u16     tx_free_desc;
-       u16     mcr0, mcr1;
+       u16     mcr0;
        struct net_device *dev;
        struct mii_bus *mii_bus;
        struct napi_struct napi;
@@ -204,7 +213,7 @@ static char version[] __devinitdata = DRV_NAME
 /* Read a word data from PHY Chip */
 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 {
-       int limit = 2048;
+       int limit = MAC_DEF_TIMEOUT;
        u16 cmd;
 
        iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
@@ -222,7 +231,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 static void r6040_phy_write(void __iomem *ioaddr,
                                        int phy_addr, int reg, u16 val)
 {
-       int limit = 2048;
+       int limit = MAC_DEF_TIMEOUT;
        u16 cmd;
 
        iowrite16(val, ioaddr + MMWD);
@@ -358,27 +367,35 @@ err_exit:
        return rc;
 }
 
-static void r6040_init_mac_regs(struct net_device *dev)
+static void r6040_reset_mac(struct r6040_private *lp)
 {
-       struct r6040_private *lp = netdev_priv(dev);
        void __iomem *ioaddr = lp->base;
-       int limit = 2048;
+       int limit = MAC_DEF_TIMEOUT;
        u16 cmd;
 
-       /* Mask Off Interrupt */
-       iowrite16(MSK_INT, ioaddr + MIER);
-
-       /* Reset RDC MAC */
        iowrite16(MAC_RST, ioaddr + MCR1);
        while (limit--) {
                cmd = ioread16(ioaddr + MCR1);
                if (cmd & MAC_RST)
                        break;
        }
+
        /* Reset internal state machine */
        iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
        iowrite16(0, ioaddr + MAC_SM);
        mdelay(5);
+}
+
+static void r6040_init_mac_regs(struct net_device *dev)
+{
+       struct r6040_private *lp = netdev_priv(dev);
+       void __iomem *ioaddr = lp->base;
+
+       /* Mask Off Interrupt */
+       iowrite16(MSK_INT, ioaddr + MIER);
+
+       /* Reset RDC MAC */
+       r6040_reset_mac(lp);
 
        /* MAC Bus Control Register */
        iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -407,7 +424,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
        /* Let TX poll the descriptors
         * we may got called by r6040_tx_timeout which has left
         * some unsent tx buffers */
-       iowrite16(0x01, ioaddr + MTPR);
+       iowrite16(TM2TX, ioaddr + MTPR);
 }
 
 static void r6040_tx_timeout(struct net_device *dev)
@@ -445,18 +462,13 @@ static void r6040_down(struct net_device *dev)
 {
        struct r6040_private *lp = netdev_priv(dev);
        void __iomem *ioaddr = lp->base;
-       int limit = 2048;
        u16 *adrp;
-       u16 cmd;
 
        /* Stop MAC */
        iowrite16(MSK_INT, ioaddr + MIER);      /* Mask Off Interrupt */
-       iowrite16(MAC_RST, ioaddr + MCR1);      /* Reset RDC MAC */
-       while (limit--) {
-               cmd = ioread16(ioaddr + MCR1);
-               if (cmd & MAC_RST)
-                       break;
-       }
+
+       /* Reset RDC MAC */
+       r6040_reset_mac(lp);
 
        /* Restore MAC Address to MIDx */
        adrp = (u16 *) dev->dev_addr;
@@ -599,9 +611,9 @@ static void r6040_tx(struct net_device *dev)
                /* Check for errors */
                err = ioread16(ioaddr + MLSR);
 
-               if (err & 0x0200)
-                       dev->stats.rx_fifo_errors++;
-               if (err & (0x2000 | 0x4000))
+               if (err & TX_FIFO_UNDR)
+                       dev->stats.tx_fifo_errors++;
+               if (err & (TX_EXCEEDC | TX_LATEC))
                        dev->stats.tx_carrier_errors++;
 
                if (descptr->status & DSC_OWNER_MAC)
@@ -736,11 +748,7 @@ static void r6040_mac_address(struct net_device *dev)
        u16 *adrp;
 
        /* Reset MAC */
-       iowrite16(MAC_RST, ioaddr + MCR1);
-       /* Reset internal state machine */
-       iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
-       iowrite16(0, ioaddr + MAC_SM);
-       mdelay(5);
+       r6040_reset_mac(lp);
 
        /* Restore MAC Address */
        adrp = (u16 *) dev->dev_addr;
@@ -840,7 +848,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
        skb_tx_timestamp(skb);
 
        /* Trigger the MAC to check the TX descriptor */
-       iowrite16(0x01, ioaddr + MTPR);
+       iowrite16(TM2TX, ioaddr + MTPR);
        lp->tx_insert_ptr = descptr->vndescp;
 
        /* If no tx resource, stop */
@@ -973,6 +981,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
        .get_settings           = netdev_get_settings,
        .set_settings           = netdev_set_settings,
        .get_link               = ethtool_op_get_link,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops r6040_netdev_ops = {
@@ -1126,10 +1135,15 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
                err = -EIO;
                goto err_out_free_res;
        }
+
        /* If PHY status change register is still set to zero it means the
-        * bootloader didn't initialize it */
+        * bootloader didn't initialize it, so we set it to:
+        * - enable phy status change
+        * - enable all phy addresses
+        * - set to lowest timer divider */
        if (ioread16(ioaddr + PHY_CC) == 0)
-               iowrite16(0x9f07, ioaddr + PHY_CC);
+               iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
+                               7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
 
        /* Init system & device */
        lp->base = ioaddr;
index abc79076f867baa5220361e0d6810d11e7edc8ef..69c7d695807c1c00750e25a263b3da79668e22ba 100644 (file)
@@ -635,9 +635,12 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
  */
 static void cp_poll_controller(struct net_device *dev)
 {
-       disable_irq(dev->irq);
-       cp_interrupt(dev->irq, dev);
-       enable_irq(dev->irq);
+       struct cp_private *cp = netdev_priv(dev);
+       const int irq = cp->pdev->irq;
+
+       disable_irq(irq);
+       cp_interrupt(irq, dev);
+       enable_irq(irq);
 }
 #endif
 
@@ -1114,6 +1117,7 @@ static void cp_free_rings (struct cp_private *cp)
 static int cp_open (struct net_device *dev)
 {
        struct cp_private *cp = netdev_priv(dev);
+       const int irq = cp->pdev->irq;
        int rc;
 
        netif_dbg(cp, ifup, dev, "enabling interface\n");
@@ -1126,7 +1130,7 @@ static int cp_open (struct net_device *dev)
 
        cp_init_hw(cp);
 
-       rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
+       rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
        if (rc)
                goto err_out_hw;
 
@@ -1161,7 +1165,7 @@ static int cp_close (struct net_device *dev)
 
        spin_unlock_irqrestore(&cp->lock, flags);
 
-       free_irq(dev->irq, dev);
+       free_irq(cp->pdev->irq, dev);
 
        cp_free_rings(cp);
        return 0;
@@ -1909,7 +1913,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
                       (unsigned long long)pciaddr);
                goto err_out_res;
        }
-       dev->base_addr = (unsigned long) regs;
        cp->regs = regs;
 
        cp_stop_hw(cp);
@@ -1937,14 +1940,12 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
 
-       dev->irq = pdev->irq;
-
        rc = register_netdev(dev);
        if (rc)
                goto err_out_iomap;
 
-       netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
-                   dev->base_addr, dev->dev_addr, dev->irq);
+       netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
+                   regs, dev->dev_addr, pdev->irq);
 
        pci_set_drvdata(pdev, dev);
 
index df7fd8d083dc1a133805f1b54ca47b4e0bb4c44f..03df076ed596086abbfd1dde5383badc4614ab8e 100644 (file)
@@ -148,9 +148,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
 /* Whether to use MMIO or PIO. Default to MMIO. */
 #ifdef CONFIG_8139TOO_PIO
-static int use_io = 1;
+static bool use_io = true;
 #else
-static int use_io = 0;
+static bool use_io = false;
 #endif
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -620,7 +620,7 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-module_param(use_io, int, 0);
+module_param(use_io, bool, 0);
 MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
 module_param(multicast_filter_limit, int, 0);
 module_param_array(media, int, NULL, 0);
@@ -750,15 +750,22 @@ static void rtl8139_chip_reset (void __iomem *ioaddr)
 
 static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
 {
+       struct device *d = &pdev->dev;
        void __iomem *ioaddr;
        struct net_device *dev;
        struct rtl8139_private *tp;
        u8 tmp8;
        int rc, disable_dev_on_err = 0;
-       unsigned int i;
-       unsigned long pio_start, pio_end, pio_flags, pio_len;
-       unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+       unsigned int i, bar;
+       unsigned long io_len;
        u32 version;
+       static const struct {
+               unsigned long mask;
+               char *type;
+       } res[] = {
+               { IORESOURCE_IO,  "PIO" },
+               { IORESOURCE_MEM, "MMIO" }
+       };
 
        assert (pdev != NULL);
 
@@ -777,78 +784,45 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
        if (rc)
                goto err_out;
 
-       pio_start = pci_resource_start (pdev, 0);
-       pio_end = pci_resource_end (pdev, 0);
-       pio_flags = pci_resource_flags (pdev, 0);
-       pio_len = pci_resource_len (pdev, 0);
-
-       mmio_start = pci_resource_start (pdev, 1);
-       mmio_end = pci_resource_end (pdev, 1);
-       mmio_flags = pci_resource_flags (pdev, 1);
-       mmio_len = pci_resource_len (pdev, 1);
-
-       /* set this immediately, we need to know before
-        * we talk to the chip directly */
-       pr_debug("PIO region size == 0x%02lX\n", pio_len);
-       pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
-
-retry:
-       if (use_io) {
-               /* make sure PCI base addr 0 is PIO */
-               if (!(pio_flags & IORESOURCE_IO)) {
-                       dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
-                       rc = -ENODEV;
-                       goto err_out;
-               }
-               /* check for weird/broken PCI region reporting */
-               if (pio_len < RTL_MIN_IO_SIZE) {
-                       dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
-                       rc = -ENODEV;
-                       goto err_out;
-               }
-       } else {
-               /* make sure PCI base addr 1 is MMIO */
-               if (!(mmio_flags & IORESOURCE_MEM)) {
-                       dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
-                       rc = -ENODEV;
-                       goto err_out;
-               }
-               if (mmio_len < RTL_MIN_IO_SIZE) {
-                       dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
-                       rc = -ENODEV;
-                       goto err_out;
-               }
-       }
-
        rc = pci_request_regions (pdev, DRV_NAME);
        if (rc)
                goto err_out;
        disable_dev_on_err = 1;
 
-       /* enable PCI bus-mastering */
        pci_set_master (pdev);
 
-       if (use_io) {
-               ioaddr = pci_iomap(pdev, 0, 0);
-               if (!ioaddr) {
-                       dev_err(&pdev->dev, "cannot map PIO, aborting\n");
-                       rc = -EIO;
-                       goto err_out;
-               }
-               dev->base_addr = pio_start;
-               tp->regs_len = pio_len;
-       } else {
-               /* ioremap MMIO region */
-               ioaddr = pci_iomap(pdev, 1, 0);
-               if (ioaddr == NULL) {
-                       dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n");
-                       pci_release_regions(pdev);
-                       use_io = 1;
+retry:
+       /* PIO bar register comes first. */
+       bar = !use_io;
+
+       io_len = pci_resource_len(pdev, bar);
+
+       dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len);
+
+       if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) {
+               dev_err(d, "region #%d not a %s resource, aborting\n", bar,
+                       res[bar].type);
+               rc = -ENODEV;
+               goto err_out;
+       }
+       if (io_len < RTL_MIN_IO_SIZE) {
+               dev_err(d, "Invalid PCI %s region size(s), aborting\n",
+                       res[bar].type);
+               rc = -ENODEV;
+               goto err_out;
+       }
+
+       ioaddr = pci_iomap(pdev, bar, 0);
+       if (!ioaddr) {
+               dev_err(d, "cannot map %s\n", res[bar].type);
+               if (!use_io) {
+                       use_io = true;
                        goto retry;
                }
-               dev->base_addr = (long) ioaddr;
-               tp->regs_len = mmio_len;
+               rc = -ENODEV;
+               goto err_out;
        }
+       tp->regs_len = io_len;
        tp->mmio_addr = ioaddr;
 
        /* Bring old chips out of low-power mode. */
@@ -1035,8 +1009,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
        dev->hw_features |= NETIF_F_RXALL;
        dev->hw_features |= NETIF_F_RXFCS;
 
-       dev->irq = pdev->irq;
-
        /* tp zeroed and aligned in alloc_etherdev */
        tp = netdev_priv(dev);
 
@@ -1062,9 +1034,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
 
        pci_set_drvdata (pdev, dev);
 
-       netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+       netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n",
                    board_info[ent->driver_data].name,
-                   dev->base_addr, dev->dev_addr, dev->irq);
+                   ioaddr, dev->dev_addr, pdev->irq);
 
        netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
                   rtl_chip_info[tp->chipset].name);
@@ -1339,10 +1311,11 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
 static int rtl8139_open (struct net_device *dev)
 {
        struct rtl8139_private *tp = netdev_priv(dev);
-       int retval;
        void __iomem *ioaddr = tp->mmio_addr;
+       const int irq = tp->pci_dev->irq;
+       int retval;
 
-       retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
+       retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
        if (retval)
                return retval;
 
@@ -1351,7 +1324,7 @@ static int rtl8139_open (struct net_device *dev)
        tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
                                           &tp->rx_ring_dma, GFP_KERNEL);
        if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
-               free_irq(dev->irq, dev);
+               free_irq(irq, dev);
 
                if (tp->tx_bufs)
                        dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
@@ -1377,7 +1350,7 @@ static int rtl8139_open (struct net_device *dev)
                  "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
                  __func__,
                  (unsigned long long)pci_resource_start (tp->pci_dev, 1),
-                 dev->irq, RTL_R8 (MediaStatus),
+                 irq, RTL_R8 (MediaStatus),
                  tp->mii.full_duplex ? "full" : "half");
 
        rtl8139_start_thread(tp);
@@ -2240,9 +2213,12 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
  */
 static void rtl8139_poll_controller(struct net_device *dev)
 {
-       disable_irq(dev->irq);
-       rtl8139_interrupt(dev->irq, dev);
-       enable_irq(dev->irq);
+       struct rtl8139_private *tp = netdev_priv(dev);
+       const int irq = tp->pci_dev->irq;
+
+       disable_irq(irq);
+       rtl8139_interrupt(irq, dev);
+       enable_irq(irq);
 }
 #endif
 
@@ -2295,7 +2271,7 @@ static int rtl8139_close (struct net_device *dev)
 
        spin_unlock_irqrestore (&tp->lock, flags);
 
-       free_irq (dev->irq, dev);
+       free_irq(tp->pci_dev->irq, dev);
 
        rtl8139_tx_clear (tp);
 
index f54509377efad8354345176fdb9c1951a9fdd4de..71393ea8ef51162586b344666bcf8b47937b5fe9 100644 (file)
@@ -1853,6 +1853,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_strings            = rtl8169_get_strings,
        .get_sset_count         = rtl8169_get_sset_count,
        .get_ethtool_stats      = rtl8169_get_ethtool_stats,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
index 3fb2355af37e5bebcbbd7e6fa706ee120056bf8b..46df3a04030c20af3a3288d7a27423b095694dfc 100644 (file)
@@ -4,11 +4,11 @@
 
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
-       depends on SUPERH && \
+       depends on (SUPERH || ARCH_SHMOBILE) && \
                (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
                 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
                 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
-                CPU_SUBTYPE_SH7757)
+                CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
        select CRC32
        select NET_CORE
        select MII
@@ -17,4 +17,5 @@ config SH_ETH
        ---help---
          Renesas SuperH Ethernet device driver.
          This driver supporting CPUs are:
-               - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757.
+               - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
+                 and R8A7740.
index d63e09b29a961b8cfc809a23b04821b8d56e7a8f..be3c22179161504f39eb2527b85cfa44eb2da685 100644 (file)
@@ -386,6 +386,114 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
                sh_eth_write(ndev, 0x0, CSMR);
 }
 
+#elif defined(CONFIG_ARCH_R8A7740)
+#define SH_ETH_HAS_TSU 1
+static void sh_eth_chip_reset(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       unsigned long mii;
+
+       /* reset device */
+       sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+       mdelay(1);
+
+       switch (mdp->phy_interface) {
+       case PHY_INTERFACE_MODE_GMII:
+               mii = 2;
+               break;
+       case PHY_INTERFACE_MODE_MII:
+               mii = 1;
+               break;
+       case PHY_INTERFACE_MODE_RMII:
+       default:
+               mii = 0;
+               break;
+       }
+       sh_eth_write(ndev, mii, RMII_MII);
+}
+
+static void sh_eth_reset(struct net_device *ndev)
+{
+       int cnt = 100;
+
+       sh_eth_write(ndev, EDSR_ENALL, EDSR);
+       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+       while (cnt > 0) {
+               if (!(sh_eth_read(ndev, EDMR) & 0x3))
+                       break;
+               mdelay(1);
+               cnt--;
+       }
+       if (cnt == 0)
+               printk(KERN_ERR "Device reset fail\n");
+
+       /* Table Init */
+       sh_eth_write(ndev, 0x0, TDLAR);
+       sh_eth_write(ndev, 0x0, TDFAR);
+       sh_eth_write(ndev, 0x0, TDFXR);
+       sh_eth_write(ndev, 0x0, TDFFR);
+       sh_eth_write(ndev, 0x0, RDLAR);
+       sh_eth_write(ndev, 0x0, RDFAR);
+       sh_eth_write(ndev, 0x0, RDFXR);
+       sh_eth_write(ndev, 0x0, RDFFR);
+}
+
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       if (mdp->duplex) /* Full */
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+       else            /* Half */
+               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       switch (mdp->speed) {
+       case 10: /* 10BASE */
+               sh_eth_write(ndev, GECMR_10, GECMR);
+               break;
+       case 100:/* 100BASE */
+               sh_eth_write(ndev, GECMR_100, GECMR);
+               break;
+       case 1000: /* 1000BASE */
+               sh_eth_write(ndev, GECMR_1000, GECMR);
+               break;
+       default:
+               break;
+       }
+}
+
+/* R8A7740 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+       .chip_reset     = sh_eth_chip_reset,
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate,
+
+       .ecsr_value     = ECSR_ICD | ECSR_MPD,
+       .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+       .tx_check       = EESR_TC1 | EESR_FTC,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+                         EESR_ECI,
+       .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+                         EESR_TFE,
+
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .bculr          = 1,
+       .hw_swap        = 1,
+       .no_trimd       = 1,
+       .no_ade         = 1,
+       .tsu            = 1,
+};
+
 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 #define SH_ETH_RESET_DEFAULT   1
 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -443,7 +551,7 @@ static void sh_eth_reset(struct net_device *ndev)
 }
 #endif
 
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 static void sh_eth_set_receive_align(struct sk_buff *skb)
 {
        int reserve;
@@ -919,6 +1027,10 @@ static int sh_eth_rx(struct net_device *ndev)
                desc_status = edmac_to_cpu(mdp, rxdesc->status);
                pkt_len = rxdesc->frame_length;
 
+#if defined(CONFIG_ARCH_R8A7740)
+               desc_status >>= 16;
+#endif
+
                if (--boguscnt < 0)
                        break;
 
index 0fa14afce23d47bb95fff404ca198339d3a4b891..57b8e1fc5d15a088655d69783ffb293d2bf9ac7b 100644 (file)
@@ -372,7 +372,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
 };
 
 /* Driver's parameters */
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 #define SH4_SKB_RX_ALIGN       32
 #else
 #define SH2_SH3_SKB_RX_ALIGN   2
@@ -381,7 +381,8 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
 /*
  * Register's bits
  */
-#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\
+    defined(CONFIG_ARCH_R8A7740)
 /* EDSR */
 enum EDSR_BIT {
        EDSR_ENT = 0x01, EDSR_ENR = 0x02,
index a284d6440538309cc801a9d87685af2037cc7c5b..32e55664df6e3d0dc7dd384fb763beb74cccb4ae 100644 (file)
@@ -39,9 +39,7 @@
 #define SC92031_NAME "sc92031"
 
 /* BAR 0 is MMIO, BAR 1 is PIO */
-#ifndef SC92031_USE_BAR
-#define SC92031_USE_BAR 0
-#endif
+#define SC92031_USE_PIO        0
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
 static int multicast_filter_limit = 64;
@@ -366,7 +364,7 @@ static void sc92031_disable_interrupts(struct net_device *dev)
        mmiowb();
 
        /* wait for any concurrent interrupt/tasklet to finish */
-       synchronize_irq(dev->irq);
+       synchronize_irq(priv->pdev->irq);
        tasklet_disable(&priv->tasklet);
 }
 
@@ -1114,10 +1112,13 @@ static void sc92031_tx_timeout(struct net_device *dev)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void sc92031_poll_controller(struct net_device *dev)
 {
-       disable_irq(dev->irq);
-       if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
+       struct sc92031_priv *priv = netdev_priv(dev);
+       const int irq = priv->pdev->irq;
+
+       disable_irq(irq);
+       if (sc92031_interrupt(irq, dev) != IRQ_NONE)
                sc92031_tasklet((unsigned long)dev);
-       enable_irq(dev->irq);
+       enable_irq(irq);
 }
 #endif
 
@@ -1402,7 +1403,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
        struct net_device *dev;
        struct sc92031_priv *priv;
        u32 mac0, mac1;
-       unsigned long base_addr;
 
        err = pci_enable_device(pdev);
        if (unlikely(err < 0))
@@ -1422,7 +1422,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
        if (unlikely(err < 0))
                goto out_request_regions;
 
-       port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
+       port_base = pci_iomap(pdev, SC92031_USE_PIO, 0);
        if (unlikely(!port_base)) {
                err = -EIO;
                goto out_iomap;
@@ -1437,14 +1437,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
        pci_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-#if SC92031_USE_BAR == 0
-       dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
-       dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
-#elif SC92031_USE_BAR == 1
-       dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
-#endif
-       dev->irq = pdev->irq;
-
        /* faked with skb_copy_and_csum_dev */
        dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -1478,13 +1470,9 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
        if (err < 0)
                goto out_register_netdev;
 
-#if SC92031_USE_BAR == 0
-       base_addr = dev->mem_start;
-#elif SC92031_USE_BAR == 1
-       base_addr = dev->base_addr;
-#endif
        printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
-                       base_addr, dev->dev_addr, dev->irq);
+              (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr,
+              pdev->irq);
 
        return 0;
 
index a9deda8eaf631998bf599f83832fe3e2eb0d1bee..4613591b43e74ebc34f774dba399802e459f874b 100644 (file)
@@ -729,7 +729,7 @@ static void sis190_tx_interrupt(struct net_device *dev,
  * The interrupt handler does all of the Rx thread work and cleans up after
  * the Tx thread.
  */
-static irqreturn_t sis190_interrupt(int irq, void *__dev)
+static irqreturn_t sis190_irq(int irq, void *__dev)
 {
        struct net_device *dev = __dev;
        struct sis190_private *tp = netdev_priv(dev);
@@ -772,11 +772,11 @@ out:
 static void sis190_netpoll(struct net_device *dev)
 {
        struct sis190_private *tp = netdev_priv(dev);
-       struct pci_dev *pdev = tp->pci_dev;
+       const int irq = tp->pci_dev->irq;
 
-       disable_irq(pdev->irq);
-       sis190_interrupt(pdev->irq, dev);
-       enable_irq(pdev->irq);
+       disable_irq(irq);
+       sis190_irq(irq, dev);
+       enable_irq(irq);
 }
 #endif
 
@@ -1085,7 +1085,7 @@ static int sis190_open(struct net_device *dev)
 
        sis190_request_timer(dev);
 
-       rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
+       rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
        if (rc < 0)
                goto err_release_timer_2;
 
@@ -1097,11 +1097,9 @@ err_release_timer_2:
        sis190_delete_timer(dev);
        sis190_rx_clear(tp);
 err_free_rx_1:
-       pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
-               tp->rx_dma);
+       pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
 err_free_tx_0:
-       pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
-               tp->tx_dma);
+       pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
        goto out;
 }
 
@@ -1141,7 +1139,7 @@ static void sis190_down(struct net_device *dev)
 
                spin_unlock_irq(&tp->lock);
 
-               synchronize_irq(dev->irq);
+               synchronize_irq(tp->pci_dev->irq);
 
                if (!poll_locked)
                        poll_locked++;
@@ -1161,7 +1159,7 @@ static int sis190_close(struct net_device *dev)
 
        sis190_down(dev);
 
-       free_irq(dev->irq, dev);
+       free_irq(pdev->irq, dev);
 
        pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
        pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
@@ -1884,8 +1882,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
        dev->netdev_ops = &sis190_netdev_ops;
 
        SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
-       dev->irq = pdev->irq;
-       dev->base_addr = (unsigned long) 0xdead;
        dev->watchdog_timeo = SIS190_TX_TIMEOUT;
 
        spin_lock_init(&tp->lock);
@@ -1902,7 +1898,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
                netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
                            pci_name(pdev),
                            sis_chip_info[ent->driver_data].name,
-                           ioaddr, dev->irq, dev->dev_addr);
+                           ioaddr, pdev->irq, dev->dev_addr);
                netdev_info(dev, "%s mode.\n",
                            (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
        }
index 5ccf02e7e3ad069e74578bb5760c02cceb4ecc49..203d9c6ec23a109cd9f941d2709257a2058930a4 100644 (file)
@@ -168,6 +168,8 @@ struct sis900_private {
        unsigned int cur_phy;
        struct mii_if_info mii_info;
 
+       void __iomem    *ioaddr;
+
        struct timer_list timer; /* Link status detection timer. */
        u8 autong_complete; /* 1: auto-negotiate complete  */
 
@@ -201,13 +203,18 @@ MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtere
 MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
 MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
 
+#define sw32(reg, val) iowrite32(val, ioaddr + (reg))
+#define sw8(reg, val)  iowrite8(val, ioaddr + (reg))
+#define sr32(reg)      ioread32(ioaddr + (reg))
+#define sr16(reg)      ioread16(ioaddr + (reg))
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void sis900_poll(struct net_device *dev);
 #endif
 static int sis900_open(struct net_device *net_dev);
 static int sis900_mii_probe (struct net_device * net_dev);
 static void sis900_init_rxfilter (struct net_device * net_dev);
-static u16 read_eeprom(long ioaddr, int location);
+static u16 read_eeprom(void __iomem *ioaddr, int location);
 static int mdio_read(struct net_device *net_dev, int phy_id, int location);
 static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
 static void sis900_timer(unsigned long data);
@@ -231,7 +238,7 @@ static u16 sis900_default_phy(struct net_device * net_dev);
 static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
 static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
 static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
-static void sis900_set_mode (long ioaddr, int speed, int duplex);
+static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
 static const struct ethtool_ops sis900_ethtool_ops;
 
 /**
@@ -246,7 +253,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
 
 static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
 {
-       long ioaddr = pci_resource_start(pci_dev, 0);
+       struct sis900_private *sis_priv = netdev_priv(net_dev);
+       void __iomem *ioaddr = sis_priv->ioaddr;
        u16 signature;
        int i;
 
@@ -325,29 +333,30 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
 static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
                                        struct net_device *net_dev)
 {
-       long ioaddr = net_dev->base_addr;
+       struct sis900_private *sis_priv = netdev_priv(net_dev);
+       void __iomem *ioaddr = sis_priv->ioaddr;
        u32 rfcrSave;
        u32 i;
 
-       rfcrSave = inl(rfcr + ioaddr);
+       rfcrSave = sr32(rfcr);
 
-       outl(rfcrSave | RELOAD, ioaddr + cr);
-       outl(0, ioaddr + cr);
+       sw32(cr, rfcrSave | RELOAD);
+       sw32(cr, 0);
 
        /* disable packet filtering before setting filter */
-       outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+       sw32(rfcr, rfcrSave & ~RFEN);
 
        /* load MAC addr to filter data register */
        for (i = 0 ; i < 3 ; i++) {
-               outl((i << RFADDR_shift), ioaddr + rfcr);
-               *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
+               sw32(rfcr, (i << RFADDR_shift));
+               *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
        }
 
        /* Store MAC Address in perm_addr */
        memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
 
        /* enable packet filtering */
-       outl(rfcrSave | RFEN, rfcr + ioaddr);
+       sw32(rfcr, rfcrSave | RFEN);
 
        return 1;
 }
@@ -371,31 +380,30 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
 static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
                                        struct net_device *net_dev)
 {
-       long ioaddr = net_dev->base_addr;
-       long ee_addr = ioaddr + mear;
-       u32 waittime = 0;
-       int i;
+       struct sis900_private *sis_priv = netdev_priv(net_dev);
+       void __iomem *ioaddr = sis_priv->ioaddr;
+       int wait, rc = 0;
 
-       outl(EEREQ, ee_addr);
-       while(waittime < 2000) {
-               if(inl(ee_addr) & EEGNT) {
+       sw32(mear, EEREQ);
+       for (wait = 0; wait < 2000; wait++) {
+               if (sr32(mear) & EEGNT) {
+                       u16 *mac = (u16 *)net_dev->dev_addr;
+                       int i;
 
                        /* get MAC address from EEPROM */
                        for (i = 0; i < 3; i++)
-                               ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+                               mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
 
                        /* Store MAC Address in perm_addr */
                        memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
 
-                       outl(EEDONE, ee_addr);
-                       return 1;
-               } else {
-                       udelay(1);
-                       waittime ++;
+                       rc = 1;
+                       break;
                }
+               udelay(1);
        }
-       outl(EEDONE, ee_addr);
-       return 0;
+       sw32(mear, EEDONE);
+       return rc;
 }
 
 static const struct net_device_ops sis900_netdev_ops = {
@@ -433,7 +441,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
        struct pci_dev *dev;
        dma_addr_t ring_dma;
        void *ring_space;
-       long ioaddr;
+       void __iomem *ioaddr;
        int i, ret;
        const char *card_name = card_names[pci_id->driver_data];
        const char *dev_name = pci_name(pci_dev);
@@ -464,14 +472,17 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
        SET_NETDEV_DEV(net_dev, &pci_dev->dev);
 
        /* We do a request_region() to register /proc/ioports info. */
-       ioaddr = pci_resource_start(pci_dev, 0);
        ret = pci_request_regions(pci_dev, "sis900");
        if (ret)
                goto err_out;
 
+       /* IO region. */
+       ioaddr = pci_iomap(pci_dev, 0, 0);
+       if (!ioaddr)
+               goto err_out_cleardev;
+
        sis_priv = netdev_priv(net_dev);
-       net_dev->base_addr = ioaddr;
-       net_dev->irq = pci_dev->irq;
+       sis_priv->ioaddr = ioaddr;
        sis_priv->pci_dev = pci_dev;
        spin_lock_init(&sis_priv->lock);
 
@@ -480,7 +491,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
        ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
        if (!ring_space) {
                ret = -ENOMEM;
-               goto err_out_cleardev;
+               goto err_out_unmap;
        }
        sis_priv->tx_ring = ring_space;
        sis_priv->tx_ring_dma = ring_dma;
@@ -534,7 +545,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
 
        /* 630ET : set the mii access mode as software-mode */
        if (sis_priv->chipset_rev == SIS630ET_900_REV)
-               outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr);
+               sw32(cr, ACCESSMODE | sr32(cr));
 
        /* probe for mii transceiver */
        if (sis900_mii_probe(net_dev) == 0) {
@@ -556,25 +567,27 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
                goto err_unmap_rx;
 
        /* print some information about our NIC */
-       printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
-              net_dev->name, card_name, ioaddr, net_dev->irq,
+       printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
+              net_dev->name, card_name, ioaddr, pci_dev->irq,
               net_dev->dev_addr);
 
        /* Detect Wake on Lan support */
-       ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
+       ret = (sr32(CFGPMC) & PMESP) >> 27;
        if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
                printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
 
        return 0;
 
- err_unmap_rx:
+err_unmap_rx:
        pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
                sis_priv->rx_ring_dma);
- err_unmap_tx:
+err_unmap_tx:
        pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
                sis_priv->tx_ring_dma);
- err_out_cleardev:
-       pci_set_drvdata(pci_dev, NULL);
+err_out_unmap:
+       pci_iounmap(pci_dev, ioaddr);
+err_out_cleardev:
+       pci_set_drvdata(pci_dev, NULL);
        pci_release_regions(pci_dev);
  err_out:
        free_netdev(net_dev);
@@ -798,7 +811,7 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
 
 
 /* Delay between EEPROM clock transitions. */
-#define eeprom_delay()  inl(ee_addr)
+#define eeprom_delay() sr32(mear)
 
 /**
  *     read_eeprom - Read Serial EEPROM
@@ -809,41 +822,41 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
  *     Note that location is in word (16 bits) unit
  */
 
-static u16 __devinit read_eeprom(long ioaddr, int location)
+static u16 __devinit read_eeprom(void __iomem *ioaddr, int location)
 {
+       u32 read_cmd = location | EEread;
        int i;
        u16 retval = 0;
-       long ee_addr = ioaddr + mear;
-       u32 read_cmd = location | EEread;
 
-       outl(0, ee_addr);
+       sw32(mear, 0);
        eeprom_delay();
-       outl(EECS, ee_addr);
+       sw32(mear, EECS);
        eeprom_delay();
 
        /* Shift the read command (9) bits out. */
        for (i = 8; i >= 0; i--) {
                u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
-               outl(dataval, ee_addr);
+
+               sw32(mear, dataval);
                eeprom_delay();
-               outl(dataval | EECLK, ee_addr);
+               sw32(mear, dataval | EECLK);
                eeprom_delay();
        }
-       outl(EECS, ee_addr);
+       sw32(mear, EECS);
        eeprom_delay();
 
        /* read the 16-bits data in */
        for (i = 16; i > 0; i--) {
-               outl(EECS, ee_addr);
+               sw32(mear, EECS);
                eeprom_delay();
-               outl(EECS | EECLK, ee_addr);
+               sw32(mear, EECS | EECLK);
                eeprom_delay();
-               retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0);
+               retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
                eeprom_delay();
        }
 
        /* Terminate the EEPROM access. */
-       outl(0, ee_addr);
+       sw32(mear, 0);
        eeprom_delay();
 
        return retval;
@@ -852,24 +865,27 @@ static u16 __devinit read_eeprom(long ioaddr, int location)
 /* Read and write the MII management registers using software-generated
    serial MDIO protocol. Note that the command bits and data bits are
    send out separately */
-#define mdio_delay()    inl(mdio_addr)
+#define mdio_delay()   sr32(mear)
 
-static void mdio_idle(long mdio_addr)
+static void mdio_idle(struct sis900_private *sp)
 {
-       outl(MDIO | MDDIR, mdio_addr);
+       void __iomem *ioaddr = sp->ioaddr;
+
+       sw32(mear, MDIO | MDDIR);
        mdio_delay();
-       outl(MDIO | MDDIR | MDC, mdio_addr);
+       sw32(mear, MDIO | MDDIR | MDC);
 }
 
-/* Syncronize the MII management interface by shifting 32 one bits out. */
-static void mdio_reset(long mdio_addr)
+/* Synchronize the MII management interface by shifting 32 one bits out. */
+static void mdio_reset(struct sis900_private *sp)
 {
+       void __iomem *ioaddr = sp->ioaddr;
        int i;
 
        for (i = 31; i >= 0; i--) {
-               outl(MDDIR | MDIO, mdio_addr);
+               sw32(mear, MDDIR | MDIO);
                mdio_delay();
-               outl(MDDIR | MDIO | MDC, mdio_addr);
+               sw32(mear, MDDIR | MDIO | MDC);
                mdio_delay();
        }
 }
@@ -887,31 +903,33 @@ static void mdio_reset(long mdio_addr)
 
 static int mdio_read(struct net_device *net_dev, int phy_id, int location)
 {
-       long mdio_addr = net_dev->base_addr + mear;
        int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+       struct sis900_private *sp = netdev_priv(net_dev);
+       void __iomem *ioaddr = sp->ioaddr;
        u16 retval = 0;
        int i;
 
-       mdio_reset(mdio_addr);
-       mdio_idle(mdio_addr);
+       mdio_reset(sp);
+       mdio_idle(sp);
 
        for (i = 15; i >= 0; i--) {
                int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
-               outl(dataval, mdio_addr);
+
+               sw32(mear, dataval);
                mdio_delay();
-               outl(dataval | MDC, mdio_addr);
+               sw32(mear, dataval | MDC);
                mdio_delay();
        }
 
        /* Read the 16 data bits. */
        for (i = 16; i > 0; i--) {
-               outl(0, mdio_addr);
+               sw32(mear, 0);
                mdio_delay();
-               retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0);
-               outl(MDC, mdio_addr);
+               retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
+               sw32(mear, MDC);
                mdio_delay();
        }
-       outl(0x00, mdio_addr);
+       sw32(mear, 0x00);
 
        return retval;
 }
@@ -931,19 +949,21 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location)
 static void mdio_write(struct net_device *net_dev, int phy_id, int location,
                        int value)
 {
-       long mdio_addr = net_dev->base_addr + mear;
        int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+       struct sis900_private *sp = netdev_priv(net_dev);
+       void __iomem *ioaddr = sp->ioaddr;
        int i;
 
-       mdio_reset(mdio_addr);
-       mdio_idle(mdio_addr);
+       mdio_reset(sp);
+       mdio_idle(sp);
 
        /* Shift the command bits out. */
        for (i = 15; i >= 0; i--) {
                int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
-               outb(dataval, mdio_addr);
+
+               sw8(mear, dataval);
                mdio_delay();
-               outb(dataval | MDC, mdio_addr);
+               sw8(mear, dataval | MDC);
                mdio_delay();
        }
        mdio_delay();
@@ -951,21 +971,22 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
        /* Shift the value bits out. */
        for (i = 15; i >= 0; i--) {
                int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
-               outl(dataval, mdio_addr);
+
+               sw32(mear, dataval);
                mdio_delay();
-               outl(dataval | MDC, mdio_addr);
+               sw32(mear, dataval | MDC);
                mdio_delay();
        }
        mdio_delay();
 
        /* Clear out extra bits. */
        for (i = 2; i > 0; i--) {
-               outb(0, mdio_addr);
+               sw8(mear, 0);
                mdio_delay();
-               outb(MDC, mdio_addr);
+               sw8(mear, MDC);
                mdio_delay();
        }
-       outl(0x00, mdio_addr);
+       sw32(mear, 0x00);
 }
 
 
@@ -1000,9 +1021,12 @@ static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
 */
 static void sis900_poll(struct net_device *dev)
 {
-       disable_irq(dev->irq);
-       sis900_interrupt(dev->irq, dev);
-       enable_irq(dev->irq);
+       struct sis900_private *sp = netdev_priv(dev);
+       const int irq = sp->pci_dev->irq;
+
+       disable_irq(irq);
+       sis900_interrupt(irq, dev);
+       enable_irq(irq);
 }
 #endif
 
@@ -1018,7 +1042,7 @@ static int
 sis900_open(struct net_device *net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        int ret;
 
        /* Soft reset the chip. */
@@ -1027,8 +1051,8 @@ sis900_open(struct net_device *net_dev)
        /* Equalizer workaround Rule */
        sis630_set_eq(net_dev, sis_priv->chipset_rev);
 
-       ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED,
-                                               net_dev->name, net_dev);
+       ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
+                         net_dev->name, net_dev);
        if (ret)
                return ret;
 
@@ -1042,12 +1066,12 @@ sis900_open(struct net_device *net_dev)
        netif_start_queue(net_dev);
 
        /* Workaround for EDB */
-       sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+       sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
        /* Enable all known interrupts by setting the interrupt mask. */
-       outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
-       outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
-       outl(IE, ioaddr + ier);
+       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+       sw32(cr, RxENA | sr32(cr));
+       sw32(ier, IE);
 
        sis900_check_mode(net_dev, sis_priv->mii);
 
@@ -1074,31 +1098,30 @@ static void
 sis900_init_rxfilter (struct net_device * net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        u32 rfcrSave;
        u32 i;
 
-       rfcrSave = inl(rfcr + ioaddr);
+       rfcrSave = sr32(rfcr);
 
        /* disable packet filtering before setting filter */
-       outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+       sw32(rfcr, rfcrSave & ~RFEN);
 
        /* load MAC addr to filter data register */
        for (i = 0 ; i < 3 ; i++) {
-               u32 w;
+               u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
 
-               w = (u32) *((u16 *)(net_dev->dev_addr)+i);
-               outl((i << RFADDR_shift), ioaddr + rfcr);
-               outl(w, ioaddr + rfdr);
+               sw32(rfcr, i << RFADDR_shift);
+               sw32(rfdr, w);
 
                if (netif_msg_hw(sis_priv)) {
                        printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
-                              net_dev->name, i, inl(ioaddr + rfdr));
+                              net_dev->name, i, sr32(rfdr));
                }
        }
 
        /* enable packet filtering */
-       outl(rfcrSave | RFEN, rfcr + ioaddr);
+       sw32(rfcr, rfcrSave | RFEN);
 }
 
 /**
@@ -1112,7 +1135,7 @@ static void
 sis900_init_tx_ring(struct net_device *net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        int i;
 
        sis_priv->tx_full = 0;
@@ -1128,10 +1151,10 @@ sis900_init_tx_ring(struct net_device *net_dev)
        }
 
        /* load Transmit Descriptor Register */
-       outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+       sw32(txdp, sis_priv->tx_ring_dma);
        if (netif_msg_hw(sis_priv))
                printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
-                      net_dev->name, inl(ioaddr + txdp));
+                      net_dev->name, sr32(txdp));
 }
 
 /**
@@ -1146,7 +1169,7 @@ static void
 sis900_init_rx_ring(struct net_device *net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        int i;
 
        sis_priv->cur_rx = 0;
@@ -1181,10 +1204,10 @@ sis900_init_rx_ring(struct net_device *net_dev)
        sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
 
        /* load Receive Descriptor Register */
-       outl(sis_priv->rx_ring_dma, ioaddr + rxdp);
+       sw32(rxdp, sis_priv->rx_ring_dma);
        if (netif_msg_hw(sis_priv))
                printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
-                      net_dev->name, inl(ioaddr + rxdp));
+                      net_dev->name, sr32(rxdp));
 }
 
 /**
@@ -1298,7 +1321,7 @@ static void sis900_timer(unsigned long data)
 
                sis900_read_mode(net_dev, &speed, &duplex);
                if (duplex){
-                       sis900_set_mode(net_dev->base_addr, speed, duplex);
+                       sis900_set_mode(sis_priv, speed, duplex);
                        sis630_set_eq(net_dev, sis_priv->chipset_rev);
                        netif_start_queue(net_dev);
                }
@@ -1359,25 +1382,25 @@ static void sis900_timer(unsigned long data)
 static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        int speed, duplex;
 
        if (mii_phy->phy_types == LAN) {
-               outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg);
+               sw32(cfg, ~EXD & sr32(cfg));
                sis900_set_capability(net_dev , mii_phy);
                sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
        } else {
-               outl(EXD | inl(ioaddr + cfg), ioaddr + cfg);
+               sw32(cfg, EXD | sr32(cfg));
                speed = HW_SPEED_HOME;
                duplex = FDX_CAPABLE_HALF_SELECTED;
-               sis900_set_mode(ioaddr, speed, duplex);
+               sis900_set_mode(sis_priv, speed, duplex);
                sis_priv->autong_complete = 1;
        }
 }
 
 /**
  *     sis900_set_mode - Set the media mode of mac register.
- *     @ioaddr: the address of the device
+ *     @sp:     the device private data
  *     @speed : the transmit speed to be determined
  *     @duplex: the duplex mode to be determined
  *
@@ -1388,11 +1411,12 @@ static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_ph
  *     double words.
  */
 
-static void sis900_set_mode (long ioaddr, int speed, int duplex)
+static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
 {
+       void __iomem *ioaddr = sp->ioaddr;
        u32 tx_flags = 0, rx_flags = 0;
 
-       if (inl(ioaddr + cfg) & EDB_MASTER_EN) {
+       if (sr32( cfg) & EDB_MASTER_EN) {
                tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
                                        (TX_FILL_THRESH << TxFILLT_shift);
                rx_flags = DMA_BURST_64 << RxMXDMA_shift;
@@ -1420,8 +1444,8 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
        rx_flags |= RxAJAB;
 #endif
 
-       outl (tx_flags, ioaddr + txcfg);
-       outl (rx_flags, ioaddr + rxcfg);
+       sw32(txcfg, tx_flags);
+       sw32(rxcfg, rx_flags);
 }
 
 /**
@@ -1528,16 +1552,17 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
 static void sis900_tx_timeout(struct net_device *net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        unsigned long flags;
        int i;
 
-       if(netif_msg_tx_err(sis_priv))
+       if (netif_msg_tx_err(sis_priv)) {
                printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
-                       net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
+                       net_dev->name, sr32(cr), sr32(isr));
+       }
 
        /* Disable interrupts by clearing the interrupt mask. */
-       outl(0x0000, ioaddr + imr);
+       sw32(imr, 0x0000);
 
        /* use spinlock to prevent interrupt handler accessing buffer ring */
        spin_lock_irqsave(&sis_priv->lock, flags);
@@ -1566,10 +1591,10 @@ static void sis900_tx_timeout(struct net_device *net_dev)
        net_dev->trans_start = jiffies; /* prevent tx timeout */
 
        /* load Transmit Descriptor Register */
-       outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+       sw32(txdp, sis_priv->tx_ring_dma);
 
        /* Enable all known interrupts by setting the interrupt mask. */
-       outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
 }
 
 /**
@@ -1586,7 +1611,7 @@ static netdev_tx_t
 sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        unsigned int  entry;
        unsigned long flags;
        unsigned int  index_cur_tx, index_dirty_tx;
@@ -1608,7 +1633,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
        sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
                skb->data, skb->len, PCI_DMA_TODEVICE);
        sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
-       outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+       sw32(cr, TxENA | sr32(cr));
 
        sis_priv->cur_tx ++;
        index_cur_tx = sis_priv->cur_tx;
@@ -1654,14 +1679,14 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
        struct net_device *net_dev = dev_instance;
        struct sis900_private *sis_priv = netdev_priv(net_dev);
        int boguscnt = max_interrupt_work;
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        u32 status;
        unsigned int handled = 0;
 
        spin_lock (&sis_priv->lock);
 
        do {
-               status = inl(ioaddr + isr);
+               status = sr32(isr);
 
                if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
                        /* nothing intresting happened */
@@ -1696,7 +1721,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
        if(netif_msg_intr(sis_priv))
                printk(KERN_DEBUG "%s: exiting interrupt, "
                       "interrupt status = 0x%#8.8x.\n",
-                      net_dev->name, inl(ioaddr + isr));
+                      net_dev->name, sr32(isr));
 
        spin_unlock (&sis_priv->lock);
        return IRQ_RETVAL(handled);
@@ -1715,7 +1740,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
 static int sis900_rx(struct net_device *net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
        u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
        int rx_work_limit;
@@ -1847,7 +1872,7 @@ refill_rx_ring:
                }
        }
        /* re-enable the potentially idle receive state matchine */
-       outl(RxENA | inl(ioaddr + cr), ioaddr + cr );
+       sw32(cr , RxENA | sr32(cr));
 
        return 0;
 }
@@ -1932,31 +1957,31 @@ static void sis900_finish_xmit (struct net_device *net_dev)
 
 static int sis900_close(struct net_device *net_dev)
 {
-       long ioaddr = net_dev->base_addr;
        struct sis900_private *sis_priv = netdev_priv(net_dev);
+       struct pci_dev *pdev = sis_priv->pci_dev;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        struct sk_buff *skb;
        int i;
 
        netif_stop_queue(net_dev);
 
        /* Disable interrupts by clearing the interrupt mask. */
-       outl(0x0000, ioaddr + imr);
-       outl(0x0000, ioaddr + ier);
+       sw32(imr, 0x0000);
+       sw32(ier, 0x0000);
 
        /* Stop the chip's Tx and Rx Status Machine */
-       outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+       sw32(cr, RxDIS | TxDIS | sr32(cr));
 
        del_timer(&sis_priv->timer);
 
-       free_irq(net_dev->irq, net_dev);
+       free_irq(pdev->irq, net_dev);
 
        /* Free Tx and RX skbuff */
        for (i = 0; i < NUM_RX_DESC; i++) {
                skb = sis_priv->rx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(sis_priv->pci_dev,
-                               sis_priv->rx_ring[i].bufptr,
-                               RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
+                                        RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(skb);
                        sis_priv->rx_skbuff[i] = NULL;
                }
@@ -1964,9 +1989,8 @@ static int sis900_close(struct net_device *net_dev)
        for (i = 0; i < NUM_TX_DESC; i++) {
                skb = sis_priv->tx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(sis_priv->pci_dev,
-                               sis_priv->tx_ring[i].bufptr, skb->len,
-                               PCI_DMA_TODEVICE);
+                       pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
+                                        skb->len, PCI_DMA_TODEVICE);
                        dev_kfree_skb(skb);
                        sis_priv->tx_skbuff[i] = NULL;
                }
@@ -2055,14 +2079,14 @@ static int sis900_nway_reset(struct net_device *net_dev)
 static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long pmctrl_addr = net_dev->base_addr + pmctrl;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        u32 cfgpmcsr = 0, pmctrl_bits = 0;
 
        if (wol->wolopts == 0) {
                pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
                cfgpmcsr &= ~PME_EN;
                pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
-               outl(pmctrl_bits, pmctrl_addr);
+               sw32(pmctrl, pmctrl_bits);
                if (netif_msg_wol(sis_priv))
                        printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
                return 0;
@@ -2077,7 +2101,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
        if (wol->wolopts & WAKE_PHY)
                pmctrl_bits |= LINKON;
 
-       outl(pmctrl_bits, pmctrl_addr);
+       sw32(pmctrl, pmctrl_bits);
 
        pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
        cfgpmcsr |= PME_EN;
@@ -2090,10 +2114,11 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
 
 static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
 {
-       long pmctrl_addr = net_dev->base_addr + pmctrl;
+       struct sis900_private *sp = netdev_priv(net_dev);
+       void __iomem *ioaddr = sp->ioaddr;
        u32 pmctrl_bits;
 
-       pmctrl_bits = inl(pmctrl_addr);
+       pmctrl_bits = sr32(pmctrl);
        if (pmctrl_bits & MAGICPKT)
                wol->wolopts |= WAKE_MAGIC;
        if (pmctrl_bits & LINKON)
@@ -2279,8 +2304,8 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
 
 static void set_rx_mode(struct net_device *net_dev)
 {
-       long ioaddr = net_dev->base_addr;
        struct sis900_private *sis_priv = netdev_priv(net_dev);
+       void __iomem *ioaddr = sis_priv->ioaddr;
        u16 mc_filter[16] = {0};        /* 256/128 bits multicast hash table */
        int i, table_entries;
        u32 rx_mode;
@@ -2322,24 +2347,24 @@ static void set_rx_mode(struct net_device *net_dev)
        /* update Multicast Hash Table in Receive Filter */
        for (i = 0; i < table_entries; i++) {
                 /* why plus 0x04 ??, That makes the correct value for hash table. */
-               outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr);
-               outl(mc_filter[i], ioaddr + rfdr);
+               sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
+               sw32(rfdr, mc_filter[i]);
        }
 
-       outl(RFEN | rx_mode, ioaddr + rfcr);
+       sw32(rfcr, RFEN | rx_mode);
 
        /* sis900 is capable of looping back packets at MAC level for
         * debugging purpose */
        if (net_dev->flags & IFF_LOOPBACK) {
                u32 cr_saved;
                /* We must disable Tx/Rx before setting loopback mode */
-               cr_saved = inl(ioaddr + cr);
-               outl(cr_saved | TxDIS | RxDIS, ioaddr + cr);
+               cr_saved = sr32(cr);
+               sw32(cr, cr_saved | TxDIS | RxDIS);
                /* enable loopback */
-               outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg);
-               outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg);
+               sw32(txcfg, sr32(txcfg) | TxMLB);
+               sw32(rxcfg, sr32(rxcfg) | RxATX);
                /* restore cr */
-               outl(cr_saved, ioaddr + cr);
+               sw32(cr, cr_saved);
        }
 }
 
@@ -2355,26 +2380,25 @@ static void set_rx_mode(struct net_device *net_dev)
 static void sis900_reset(struct net_device *net_dev)
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
-       int i = 0;
+       void __iomem *ioaddr = sis_priv->ioaddr;
        u32 status = TxRCMP | RxRCMP;
+       int i;
 
-       outl(0, ioaddr + ier);
-       outl(0, ioaddr + imr);
-       outl(0, ioaddr + rfcr);
+       sw32(ier, 0);
+       sw32(imr, 0);
+       sw32(rfcr, 0);
 
-       outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr);
+       sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
 
        /* Check that the chip has finished the reset. */
-       while (status && (i++ < 1000)) {
-               status ^= (inl(isr + ioaddr) & status);
-       }
+       for (i = 0; status && (i < 1000); i++)
+               status ^= sr32(isr) & status;
 
-       if( (sis_priv->chipset_rev >= SIS635A_900_REV) ||
-                       (sis_priv->chipset_rev == SIS900B_900_REV) )
-               outl(PESEL | RND_CNT, ioaddr + cfg);
+       if (sis_priv->chipset_rev >= SIS635A_900_REV ||
+           sis_priv->chipset_rev == SIS900B_900_REV)
+               sw32(cfg, PESEL | RND_CNT);
        else
-               outl(PESEL, ioaddr + cfg);
+               sw32(cfg, PESEL);
 }
 
 /**
@@ -2388,10 +2412,12 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
 {
        struct net_device *net_dev = pci_get_drvdata(pci_dev);
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       struct mii_phy *phy = NULL;
+
+       unregister_netdev(net_dev);
 
        while (sis_priv->first_mii) {
-               phy = sis_priv->first_mii;
+               struct mii_phy *phy = sis_priv->first_mii;
+
                sis_priv->first_mii = phy->next;
                kfree(phy);
        }
@@ -2400,7 +2426,7 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
                sis_priv->rx_ring_dma);
        pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
                sis_priv->tx_ring_dma);
-       unregister_netdev(net_dev);
+       pci_iounmap(pci_dev, sis_priv->ioaddr);
        free_netdev(net_dev);
        pci_release_regions(pci_dev);
        pci_set_drvdata(pci_dev, NULL);
@@ -2411,7 +2437,8 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
 static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
 {
        struct net_device *net_dev = pci_get_drvdata(pci_dev);
-       long ioaddr = net_dev->base_addr;
+       struct sis900_private *sis_priv = netdev_priv(net_dev);
+       void __iomem *ioaddr = sis_priv->ioaddr;
 
        if(!netif_running(net_dev))
                return 0;
@@ -2420,7 +2447,7 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
        netif_device_detach(net_dev);
 
        /* Stop the chip's Tx and Rx Status Machine */
-       outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+       sw32(cr, RxDIS | TxDIS | sr32(cr));
 
        pci_set_power_state(pci_dev, PCI_D3hot);
        pci_save_state(pci_dev);
@@ -2432,7 +2459,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
 {
        struct net_device *net_dev = pci_get_drvdata(pci_dev);
        struct sis900_private *sis_priv = netdev_priv(net_dev);
-       long ioaddr = net_dev->base_addr;
+       void __iomem *ioaddr = sis_priv->ioaddr;
 
        if(!netif_running(net_dev))
                return 0;
@@ -2453,9 +2480,9 @@ static int sis900_resume(struct pci_dev *pci_dev)
        sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
        /* Enable all known interrupts by setting the interrupt mask. */
-       outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
-       outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
-       outl(IE, ioaddr + ier);
+       sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+       sw32(cr, RxENA | sr32(cr));
+       sw32(ier, IE);
 
        sis900_check_mode(net_dev, sis_priv->mii);
 
index 2a662e6112e9b15cbb8dcda6e30bfc9f9c70fb44..d01e59c348ad4d75be016850070e8ba03ccd30c7 100644 (file)
@@ -146,6 +146,12 @@ enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
 #define EPIC_TOTAL_SIZE 0x100
 #define USE_IO_OPS 1
 
+#ifdef USE_IO_OPS
+#define EPIC_BAR       0
+#else
+#define EPIC_BAR       1
+#endif
+
 typedef enum {
        SMSC_83C170_0,
        SMSC_83C170,
@@ -176,21 +182,11 @@ static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
 };
 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 
-
-#ifndef USE_IO_OPS
-#undef inb
-#undef inw
-#undef inl
-#undef outb
-#undef outw
-#undef outl
-#define inb readb
-#define inw readw
-#define inl readl
-#define outb writeb
-#define outw writew
-#define outl writel
-#endif
+#define ew16(reg, val) iowrite16(val, ioaddr + (reg))
+#define ew32(reg, val) iowrite32(val, ioaddr + (reg))
+#define er8(reg)       ioread8(ioaddr + (reg))
+#define er16(reg)      ioread16(ioaddr + (reg))
+#define er32(reg)      ioread32(ioaddr + (reg))
 
 /* Offsets to registers, using the (ugh) SMC names. */
 enum epic_registers {
@@ -275,6 +271,7 @@ struct epic_private {
        u32 irq_mask;
        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 
+       void __iomem *ioaddr;
        struct pci_dev *pci_dev;                        /* PCI bus location. */
        int chip_id, chip_flags;
 
@@ -290,7 +287,7 @@ struct epic_private {
 };
 
 static int epic_open(struct net_device *dev);
-static int read_eeprom(long ioaddr, int location);
+static int read_eeprom(struct epic_private *, int);
 static int mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 static void epic_restart(struct net_device *dev);
@@ -321,11 +318,11 @@ static const struct net_device_ops epic_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-static int __devinit epic_init_one (struct pci_dev *pdev,
-                                   const struct pci_device_id *ent)
+static int __devinit epic_init_one(struct pci_dev *pdev,
+                                  const struct pci_device_id *ent)
 {
        static int card_idx = -1;
-       long ioaddr;
+       void __iomem *ioaddr;
        int chip_idx = (int) ent->driver_data;
        int irq;
        struct net_device *dev;
@@ -368,19 +365,15 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
 
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-#ifdef USE_IO_OPS
-       ioaddr = pci_resource_start (pdev, 0);
-#else
-       ioaddr = pci_resource_start (pdev, 1);
-       ioaddr = (long) pci_ioremap_bar(pdev, 1);
+       ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
        if (!ioaddr) {
                dev_err(&pdev->dev, "ioremap failed\n");
                goto err_out_free_netdev;
        }
-#endif
 
        pci_set_drvdata(pdev, dev);
        ep = netdev_priv(dev);
+       ep->ioaddr = ioaddr;
        ep->mii.dev = dev;
        ep->mii.mdio_read = mdio_read;
        ep->mii.mdio_write = mdio_write;
@@ -409,34 +402,31 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
                        duplex = full_duplex[card_idx];
        }
 
-       dev->base_addr = ioaddr;
-       dev->irq = irq;
-
        spin_lock_init(&ep->lock);
        spin_lock_init(&ep->napi_lock);
        ep->reschedule_in_poll = 0;
 
        /* Bring the chip out of low-power mode. */
-       outl(0x4200, ioaddr + GENCTL);
+       ew32(GENCTL, 0x4200);
        /* Magic?!  If we don't set this bit the MII interface won't work. */
        /* This magic is documented in SMSC app note 7.15 */
        for (i = 16; i > 0; i--)
-               outl(0x0008, ioaddr + TEST1);
+               ew32(TEST1, 0x0008);
 
        /* Turn on the MII transceiver. */
-       outl(0x12, ioaddr + MIICfg);
+       ew32(MIICfg, 0x12);
        if (chip_idx == 1)
-               outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
-       outl(0x0200, ioaddr + GENCTL);
+               ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
+       ew32(GENCTL, 0x0200);
 
        /* Note: the '175 does not have a serial EEPROM. */
        for (i = 0; i < 3; i++)
-               ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
+               ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 
        if (debug > 2) {
                dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
                for (i = 0; i < 64; i++)
-                       printk(" %4.4x%s", read_eeprom(ioaddr, i),
+                       printk(" %4.4x%s", read_eeprom(ep, i),
                                   i % 16 == 15 ? "\n" : "");
        }
 
@@ -481,8 +471,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
 
        /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
        if (ep->chip_flags & MII_PWRDWN)
-               outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
-       outl(0x0008, ioaddr + GENCTL);
+               ew32(NVCTL, er32(NVCTL) & ~0x483c);
+       ew32(GENCTL, 0x0008);
 
        /* The lower four bits are the media type. */
        if (duplex) {
@@ -501,8 +491,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
        if (ret < 0)
                goto err_out_unmap_rx;
 
-       printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
-              dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
+       printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
+              dev->name, pci_id_tbl[chip_idx].name,
+              (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
               dev->dev_addr);
 
 out:
@@ -513,10 +504,8 @@ err_out_unmap_rx:
 err_out_unmap_tx:
        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 err_out_iounmap:
-#ifndef USE_IO_OPS
-       iounmap(ioaddr);
+       pci_iounmap(pdev, ioaddr);
 err_out_free_netdev:
-#endif
        free_netdev(dev);
 err_out_free_res:
        pci_release_regions(pdev);
@@ -540,7 +529,7 @@ err_out_disable:
    This serves to flush the operation to the PCI bus.
  */
 
-#define eeprom_delay() inl(ee_addr)
+#define eeprom_delay() er32(EECTL)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_WRITE_CMD   (5 << 6)
@@ -550,67 +539,67 @@ err_out_disable:
 
 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 {
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
 
-       outl(0x00000000, ioaddr + INTMASK);
+       ew32(INTMASK, 0x00000000);
 }
 
-static inline void __epic_pci_commit(long ioaddr)
+static inline void __epic_pci_commit(void __iomem *ioaddr)
 {
 #ifndef USE_IO_OPS
-       inl(ioaddr + INTMASK);
+       er32(INTMASK);
 #endif
 }
 
 static inline void epic_napi_irq_off(struct net_device *dev,
                                     struct epic_private *ep)
 {
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
 
-       outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
+       ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
        __epic_pci_commit(ioaddr);
 }
 
 static inline void epic_napi_irq_on(struct net_device *dev,
                                    struct epic_private *ep)
 {
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
 
        /* No need to commit possible posted write */
-       outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
+       ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 }
 
-static int __devinit read_eeprom(long ioaddr, int location)
+static int __devinit read_eeprom(struct epic_private *ep, int location)
 {
+       void __iomem *ioaddr = ep->ioaddr;
        int i;
        int retval = 0;
-       long ee_addr = ioaddr + EECTL;
        int read_cmd = location |
-               (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+               (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 
-       outl(EE_ENB & ~EE_CS, ee_addr);
-       outl(EE_ENB, ee_addr);
+       ew32(EECTL, EE_ENB & ~EE_CS);
+       ew32(EECTL, EE_ENB);
 
        /* Shift the read command bits out. */
        for (i = 12; i >= 0; i--) {
                short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
-               outl(EE_ENB | dataval, ee_addr);
+               ew32(EECTL, EE_ENB | dataval);
                eeprom_delay();
-               outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+               ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
                eeprom_delay();
        }
-       outl(EE_ENB, ee_addr);
+       ew32(EECTL, EE_ENB);
 
        for (i = 16; i > 0; i--) {
-               outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+               ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
                eeprom_delay();
-               retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
-               outl(EE_ENB, ee_addr);
+               retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
+               ew32(EECTL, EE_ENB);
                eeprom_delay();
        }
 
        /* Terminate the EEPROM access. */
-       outl(EE_ENB & ~EE_CS, ee_addr);
+       ew32(EECTL, EE_ENB & ~EE_CS);
        return retval;
 }
 
@@ -618,22 +607,23 @@ static int __devinit read_eeprom(long ioaddr, int location)
 #define MII_WRITEOP            2
 static int mdio_read(struct net_device *dev, int phy_id, int location)
 {
-       long ioaddr = dev->base_addr;
+       struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
        int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
        int i;
 
-       outl(read_cmd, ioaddr + MIICtrl);
+       ew32(MIICtrl, read_cmd);
        /* Typical operation takes 25 loops. */
        for (i = 400; i > 0; i--) {
                barrier();
-               if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
+               if ((er32(MIICtrl) & MII_READOP) == 0) {
                        /* Work around read failure bug. */
                        if (phy_id == 1 && location < 6 &&
-                           inw(ioaddr + MIIData) == 0xffff) {
-                               outl(read_cmd, ioaddr + MIICtrl);
+                           er16(MIIData) == 0xffff) {
+                               ew32(MIICtrl, read_cmd);
                                continue;
                        }
-                       return inw(ioaddr + MIIData);
+                       return er16(MIIData);
                }
        }
        return 0xffff;
@@ -641,14 +631,15 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
 
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 {
-       long ioaddr = dev->base_addr;
+       struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
        int i;
 
-       outw(value, ioaddr + MIIData);
-       outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+       ew16(MIIData, value);
+       ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
        for (i = 10000; i > 0; i--) {
                barrier();
-               if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+               if ((er32(MIICtrl) & MII_WRITEOP) == 0)
                        break;
        }
 }
@@ -657,25 +648,26 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 static int epic_open(struct net_device *dev)
 {
        struct epic_private *ep = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
-       int i;
-       int retval;
+       void __iomem *ioaddr = ep->ioaddr;
+       const int irq = ep->pci_dev->irq;
+       int rc, i;
 
        /* Soft reset the chip. */
-       outl(0x4001, ioaddr + GENCTL);
+       ew32(GENCTL, 0x4001);
 
        napi_enable(&ep->napi);
-       if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
+       rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
+       if (rc) {
                napi_disable(&ep->napi);
-               return retval;
+               return rc;
        }
 
        epic_init_ring(dev);
 
-       outl(0x4000, ioaddr + GENCTL);
+       ew32(GENCTL, 0x4000);
        /* This magic is documented in SMSC app note 7.15 */
        for (i = 16; i > 0; i--)
-               outl(0x0008, ioaddr + TEST1);
+               ew32(TEST1, 0x0008);
 
        /* Pull the chip out of low-power mode, enable interrupts, and set for
           PCI read multiple.  The MIIcfg setting and strange write order are
@@ -683,29 +675,29 @@ static int epic_open(struct net_device *dev)
           wiring on the Ositech CardBus card.
        */
 #if 0
-       outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+       ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 #endif
        if (ep->chip_flags & MII_PWRDWN)
-               outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+               ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 
        /* Tell the chip to byteswap descriptors on big-endian hosts */
 #ifdef __BIG_ENDIAN
-       outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
-       inl(ioaddr + GENCTL);
-       outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+       ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
+       er32(GENCTL);
+       ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 #else
-       outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
-       inl(ioaddr + GENCTL);
-       outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+       ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
+       er32(GENCTL);
+       ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 #endif
 
        udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 
        for (i = 0; i < 3; i++)
-               outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+               ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 
        ep->tx_threshold = TX_FIFO_THRESH;
-       outl(ep->tx_threshold, ioaddr + TxThresh);
+       ew32(TxThresh, ep->tx_threshold);
 
        if (media2miictl[dev->if_port & 15]) {
                if (ep->mii_phy_cnt)
@@ -731,26 +723,27 @@ static int epic_open(struct net_device *dev)
                }
        }
 
-       outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
-       outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
-       outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
+       ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
+       ew32(PRxCDAR, ep->rx_ring_dma);
+       ew32(PTxCDAR, ep->tx_ring_dma);
 
        /* Start the chip's Rx process. */
        set_rx_mode(dev);
-       outl(StartRx | RxQueued, ioaddr + COMMAND);
+       ew32(COMMAND, StartRx | RxQueued);
 
        netif_start_queue(dev);
 
        /* Enable interrupts by setting the interrupt mask. */
-       outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-                | CntFull | TxUnderrun
-                | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
-
-       if (debug > 1)
-               printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
-                          "%s-duplex.\n",
-                          dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
-                          ep->mii.full_duplex ? "full" : "half");
+       ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
+            ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
+            TxUnderrun);
+
+       if (debug > 1) {
+               printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
+                      "status %4.4x %s-duplex.\n",
+                      dev->name, ioaddr, irq, er32(GENCTL),
+                      ep->mii.full_duplex ? "full" : "half");
+       }
 
        /* Set the timer to switch to check for link beat and perhaps switch
           to an alternate media type. */
@@ -760,27 +753,29 @@ static int epic_open(struct net_device *dev)
        ep->timer.function = epic_timer;                                /* timer handler */
        add_timer(&ep->timer);
 
-       return 0;
+       return rc;
 }
 
 /* Reset the chip to recover from a PCI transaction error.
    This may occur at interrupt time. */
 static void epic_pause(struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
+       struct net_device_stats *stats = &dev->stats;
+       struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
 
        netif_stop_queue (dev);
 
        /* Disable interrupts by clearing the interrupt mask. */
-       outl(0x00000000, ioaddr + INTMASK);
+       ew32(INTMASK, 0x00000000);
        /* Stop the chip's Tx and Rx DMA processes. */
-       outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+       ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 
        /* Update the error counts. */
-       if (inw(ioaddr + COMMAND) != 0xffff) {
-               dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-               dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-               dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+       if (er16(COMMAND) != 0xffff) {
+               stats->rx_missed_errors += er8(MPCNT);
+               stats->rx_frame_errors  += er8(ALICNT);
+               stats->rx_crc_errors    += er8(CRCCNT);
        }
 
        /* Remove the packets on the Rx queue. */
@@ -789,12 +784,12 @@ static void epic_pause(struct net_device *dev)
 
 static void epic_restart(struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
        struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
        int i;
 
        /* Soft reset the chip. */
-       outl(0x4001, ioaddr + GENCTL);
+       ew32(GENCTL, 0x4001);
 
        printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
                   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
@@ -802,47 +797,46 @@ static void epic_restart(struct net_device *dev)
 
        /* This magic is documented in SMSC app note 7.15 */
        for (i = 16; i > 0; i--)
-               outl(0x0008, ioaddr + TEST1);
+               ew32(TEST1, 0x0008);
 
 #ifdef __BIG_ENDIAN
-       outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+       ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 #else
-       outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+       ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 #endif
-       outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+       ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
        if (ep->chip_flags & MII_PWRDWN)
-               outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+               ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 
        for (i = 0; i < 3; i++)
-               outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+               ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 
        ep->tx_threshold = TX_FIFO_THRESH;
-       outl(ep->tx_threshold, ioaddr + TxThresh);
-       outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
-       outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
-               sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
-       outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
-                sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
+       ew32(TxThresh, ep->tx_threshold);
+       ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
+       ew32(PRxCDAR, ep->rx_ring_dma +
+            (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
+       ew32(PTxCDAR, ep->tx_ring_dma +
+            (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 
        /* Start the chip's Rx process. */
        set_rx_mode(dev);
-       outl(StartRx | RxQueued, ioaddr + COMMAND);
+       ew32(COMMAND, StartRx | RxQueued);
 
        /* Enable interrupts by setting the interrupt mask. */
-       outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-                | CntFull | TxUnderrun
-                | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+       ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
+            ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
+            TxUnderrun);
 
        printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
                   " interrupt %4.4x.\n",
-                  dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
-                  (int)inl(ioaddr + INTSTAT));
+                  dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 }
 
 static void check_media(struct net_device *dev)
 {
        struct epic_private *ep = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
        int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
        int negotiated = mii_lpa & ep->mii.advertising;
        int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
@@ -856,7 +850,7 @@ static void check_media(struct net_device *dev)
                printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
                           " partner capability of %4.4x.\n", dev->name,
                           ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
-               outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+               ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
        }
 }
 
@@ -864,16 +858,15 @@ static void epic_timer(unsigned long data)
 {
        struct net_device *dev = (struct net_device *)data;
        struct epic_private *ep = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
        int next_tick = 5*HZ;
 
        if (debug > 3) {
                printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
-                          dev->name, (int)inl(ioaddr + TxSTAT));
+                      dev->name, er32(TxSTAT));
                printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
-                          "IntStatus %4.4x RxStatus %4.4x.\n",
-                          dev->name, (int)inl(ioaddr + INTMASK),
-                          (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+                      "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
+                      er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
        }
 
        check_media(dev);
@@ -885,23 +878,22 @@ static void epic_timer(unsigned long data)
 static void epic_tx_timeout(struct net_device *dev)
 {
        struct epic_private *ep = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
 
        if (debug > 0) {
                printk(KERN_WARNING "%s: Transmit timeout using MII device, "
-                          "Tx status %4.4x.\n",
-                          dev->name, (int)inw(ioaddr + TxSTAT));
+                      "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
                if (debug > 1) {
                        printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
                                   dev->name, ep->dirty_tx, ep->cur_tx);
                }
        }
-       if (inw(ioaddr + TxSTAT) & 0x10) {              /* Tx FIFO underflow. */
+       if (er16(TxSTAT) & 0x10) {              /* Tx FIFO underflow. */
                dev->stats.tx_fifo_errors++;
-               outl(RestartTx, ioaddr + COMMAND);
+               ew32(COMMAND, RestartTx);
        } else {
                epic_restart(dev);
-               outl(TxQueued, dev->base_addr + COMMAND);
+               ew32(COMMAND, TxQueued);
        }
 
        dev->trans_start = jiffies; /* prevent tx timeout */
@@ -959,6 +951,7 @@ static void epic_init_ring(struct net_device *dev)
 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
        int entry, free_count;
        u32 ctrl_word;
        unsigned long flags;
@@ -999,13 +992,12 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        spin_unlock_irqrestore(&ep->lock, flags);
        /* Trigger an immediate transmit demand. */
-       outl(TxQueued, dev->base_addr + COMMAND);
+       ew32(COMMAND, TxQueued);
 
        if (debug > 4)
                printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
-                          "flag %2.2x Tx status %8.8x.\n",
-                          dev->name, (int)skb->len, entry, ctrl_word,
-                          (int)inl(dev->base_addr + TxSTAT));
+                      "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
+                      entry, ctrl_word, er32(TxSTAT));
 
        return NETDEV_TX_OK;
 }
@@ -1086,18 +1078,17 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
 {
        struct net_device *dev = dev_instance;
        struct epic_private *ep = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
        unsigned int handled = 0;
        int status;
 
-       status = inl(ioaddr + INTSTAT);
+       status = er32(INTSTAT);
        /* Acknowledge all of the current interrupt sources ASAP. */
-       outl(status & EpicNormalEvent, ioaddr + INTSTAT);
+       ew32(INTSTAT, status & EpicNormalEvent);
 
        if (debug > 4) {
                printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
-                                  "intstat=%#8.8x.\n", dev->name, status,
-                                  (int)inl(ioaddr + INTSTAT));
+                      "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
        }
 
        if ((status & IntrSummary) == 0)
@@ -1118,19 +1109,21 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
 
        /* Check uncommon events all at once. */
        if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
+               struct net_device_stats *stats = &dev->stats;
+
                if (status == EpicRemoved)
                        goto out;
 
                /* Always update the error counts to avoid overhead later. */
-               dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-               dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-               dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+               stats->rx_missed_errors += er8(MPCNT);
+               stats->rx_frame_errors  += er8(ALICNT);
+               stats->rx_crc_errors    += er8(CRCCNT);
 
                if (status & TxUnderrun) { /* Tx FIFO underflow. */
-                       dev->stats.tx_fifo_errors++;
-                       outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+                       stats->tx_fifo_errors++;
+                       ew32(TxThresh, ep->tx_threshold += 128);
                        /* Restart the transmit process. */
-                       outl(RestartTx, ioaddr + COMMAND);
+                       ew32(COMMAND, RestartTx);
                }
                if (status & PCIBusErr170) {
                        printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
@@ -1139,7 +1132,7 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
                        epic_restart(dev);
                }
                /* Clear all error sources. */
-               outl(status & 0x7f18, ioaddr + INTSTAT);
+               ew32(INTSTAT, status & 0x7f18);
        }
 
 out:
@@ -1248,17 +1241,17 @@ static int epic_rx(struct net_device *dev, int budget)
 
 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
 {
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
        int status;
 
-       status = inl(ioaddr + INTSTAT);
+       status = er32(INTSTAT);
 
        if (status == EpicRemoved)
                return;
        if (status & RxOverflow)        /* Missed a Rx frame. */
                dev->stats.rx_errors++;
        if (status & (RxOverflow | RxFull))
-               outw(RxQueued, ioaddr + COMMAND);
+               ew16(COMMAND, RxQueued);
 }
 
 static int epic_poll(struct napi_struct *napi, int budget)
@@ -1266,7 +1259,7 @@ static int epic_poll(struct napi_struct *napi, int budget)
        struct epic_private *ep = container_of(napi, struct epic_private, napi);
        struct net_device *dev = ep->mii.dev;
        int work_done = 0;
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = ep->ioaddr;
 
 rx_action:
 
@@ -1287,7 +1280,7 @@ rx_action:
                more = ep->reschedule_in_poll;
                if (!more) {
                        __napi_complete(napi);
-                       outl(EpicNapiEvent, ioaddr + INTSTAT);
+                       ew32(INTSTAT, EpicNapiEvent);
                        epic_napi_irq_on(dev, ep);
                } else
                        ep->reschedule_in_poll--;
@@ -1303,8 +1296,9 @@ rx_action:
 
 static int epic_close(struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
        struct epic_private *ep = netdev_priv(dev);
+       struct pci_dev *pdev = ep->pci_dev;
+       void __iomem *ioaddr = ep->ioaddr;
        struct sk_buff *skb;
        int i;
 
@@ -1313,13 +1307,13 @@ static int epic_close(struct net_device *dev)
 
        if (debug > 1)
                printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
-                          dev->name, (int)inl(ioaddr + INTSTAT));
+                      dev->name, er32(INTSTAT));
 
        del_timer_sync(&ep->timer);
 
        epic_disable_int(dev, ep);
 
-       free_irq(dev->irq, dev);
+       free_irq(pdev->irq, dev);
 
        epic_pause(dev);
 
@@ -1330,7 +1324,7 @@ static int epic_close(struct net_device *dev)
                ep->rx_ring[i].rxstatus = 0;            /* Not owned by Epic chip. */
                ep->rx_ring[i].buflength = 0;
                if (skb) {
-                       pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
+                       pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
                                         ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(skb);
                }
@@ -1341,26 +1335,28 @@ static int epic_close(struct net_device *dev)
                ep->tx_skbuff[i] = NULL;
                if (!skb)
                        continue;
-               pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
-                                skb->len, PCI_DMA_TODEVICE);
+               pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
+                                PCI_DMA_TODEVICE);
                dev_kfree_skb(skb);
        }
 
        /* Green! Leave the chip in low-power mode. */
-       outl(0x0008, ioaddr + GENCTL);
+       ew32(GENCTL, 0x0008);
 
        return 0;
 }
 
 static struct net_device_stats *epic_get_stats(struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
+       struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
 
        if (netif_running(dev)) {
-               /* Update the error counts. */
-               dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-               dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-               dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+               struct net_device_stats *stats = &dev->stats;
+
+               stats->rx_missed_errors += er8(MPCNT);
+               stats->rx_frame_errors  += er8(ALICNT);
+               stats->rx_crc_errors    += er8(CRCCNT);
        }
 
        return &dev->stats;
@@ -1373,13 +1369,13 @@ static struct net_device_stats *epic_get_stats(struct net_device *dev)
 
 static void set_rx_mode(struct net_device *dev)
 {
-       long ioaddr = dev->base_addr;
        struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
        unsigned char mc_filter[8];              /* Multicast hash filter */
        int i;
 
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
-               outl(0x002C, ioaddr + RxCtrl);
+               ew32(RxCtrl, 0x002c);
                /* Unconditionally log net taps. */
                memset(mc_filter, 0xff, sizeof(mc_filter));
        } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
@@ -1387,9 +1383,9 @@ static void set_rx_mode(struct net_device *dev)
                   is never enabled. */
                /* Too many to filter perfectly -- accept all multicasts. */
                memset(mc_filter, 0xff, sizeof(mc_filter));
-               outl(0x000C, ioaddr + RxCtrl);
+               ew32(RxCtrl, 0x000c);
        } else if (netdev_mc_empty(dev)) {
-               outl(0x0004, ioaddr + RxCtrl);
+               ew32(RxCtrl, 0x0004);
                return;
        } else {                                        /* Never executed, for now. */
                struct netdev_hw_addr *ha;
@@ -1404,7 +1400,7 @@ static void set_rx_mode(struct net_device *dev)
        /* ToDo: perhaps we need to stop the Tx and Rx process here? */
        if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
                for (i = 0; i < 4; i++)
-                       outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+                       ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
                memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
        }
 }
@@ -1466,22 +1462,26 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
 
 static int ethtool_begin(struct net_device *dev)
 {
-       unsigned long ioaddr = dev->base_addr;
+       struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
+
        /* power-up, if interface is down */
-       if (! netif_running(dev)) {
-               outl(0x0200, ioaddr + GENCTL);
-               outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+       if (!netif_running(dev)) {
+               ew32(GENCTL, 0x0200);
+               ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
        }
        return 0;
 }
 
 static void ethtool_complete(struct net_device *dev)
 {
-       unsigned long ioaddr = dev->base_addr;
+       struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
+
        /* power-down, if interface is down */
-       if (! netif_running(dev)) {
-               outl(0x0008, ioaddr + GENCTL);
-               outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+       if (!netif_running(dev)) {
+               ew32(GENCTL, 0x0008);
+               ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
        }
 }
 
@@ -1500,14 +1500,14 @@ static const struct ethtool_ops netdev_ethtool_ops = {
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct epic_private *np = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = np->ioaddr;
        struct mii_ioctl_data *data = if_mii(rq);
        int rc;
 
        /* power-up, if interface is down */
        if (! netif_running(dev)) {
-               outl(0x0200, ioaddr + GENCTL);
-               outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+               ew32(GENCTL, 0x0200);
+               ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
        }
 
        /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
@@ -1517,14 +1517,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
        /* power-down, if interface is down */
        if (! netif_running(dev)) {
-               outl(0x0008, ioaddr + GENCTL);
-               outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+               ew32(GENCTL, 0x0008);
+               ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
        }
        return rc;
 }
 
 
-static void __devexit epic_remove_one (struct pci_dev *pdev)
+static void __devexit epic_remove_one(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct epic_private *ep = netdev_priv(dev);
@@ -1532,9 +1532,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
        pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
        unregister_netdev(dev);
-#ifndef USE_IO_OPS
-       iounmap((void*) dev->base_addr);
-#endif
+       pci_iounmap(pdev, ep->ioaddr);
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
@@ -1548,13 +1546,14 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
-       long ioaddr = dev->base_addr;
+       struct epic_private *ep = netdev_priv(dev);
+       void __iomem *ioaddr = ep->ioaddr;
 
        if (!netif_running(dev))
                return 0;
        epic_pause(dev);
        /* Put the chip into low-power mode. */
-       outl(0x0008, ioaddr + GENCTL);
+       ew32(GENCTL, 0x0008);
        /* pci_power_off(pdev, -1); */
        return 0;
 }
index 4a6971027076613f7765a187e2c28eb11df56104..519ed8ef54e089400f43ce76aa36a0c0f294b204 100644 (file)
@@ -2070,6 +2070,7 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
        .get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
        .get_eeprom = smsc911x_ethtool_get_eeprom,
        .set_eeprom = smsc911x_ethtool_set_eeprom,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops smsc911x_netdev_ops = {
index 38386478532b7db8e4983c74f805c300dfa54397..fd33b21f6c96adc579cd254efda06f437ed35428 100644 (file)
@@ -54,7 +54,7 @@ struct smsc9420_ring_info {
 };
 
 struct smsc9420_pdata {
-       void __iomem *base_addr;
+       void __iomem *ioaddr;
        struct pci_dev *pdev;
        struct net_device *dev;
 
@@ -114,13 +114,13 @@ do {      if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
 
 static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
 {
-       return ioread32(pd->base_addr + offset);
+       return ioread32(pd->ioaddr + offset);
 }
 
 static inline void
 smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
 {
-       iowrite32(value, pd->base_addr + offset);
+       iowrite32(value, pd->ioaddr + offset);
 }
 
 static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
@@ -469,6 +469,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
        .set_eeprom = smsc9420_ethtool_set_eeprom,
        .get_regs_len = smsc9420_ethtool_getregslen,
        .get_regs = smsc9420_ethtool_getregs,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 /* Sets the device MAC address to dev_addr */
@@ -659,7 +660,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
        ulong flags;
 
        BUG_ON(!pd);
-       BUG_ON(!pd->base_addr);
+       BUG_ON(!pd->ioaddr);
 
        int_cfg = smsc9420_reg_read(pd, INT_CFG);
 
@@ -720,9 +721,12 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void smsc9420_poll_controller(struct net_device *dev)
 {
-       disable_irq(dev->irq);
+       struct smsc9420_pdata *pd = netdev_priv(dev);
+       const int irq = pd->pdev->irq;
+
+       disable_irq(irq);
        smsc9420_isr(0, dev);
-       enable_irq(dev->irq);
+       enable_irq(irq);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
@@ -759,7 +763,7 @@ static int smsc9420_stop(struct net_device *dev)
        smsc9420_stop_rx(pd);
        smsc9420_free_rx_ring(pd);
 
-       free_irq(dev->irq, pd);
+       free_irq(pd->pdev->irq, pd);
 
        smsc9420_dmac_soft_reset(pd);
 
@@ -1331,15 +1335,12 @@ out:
 
 static int smsc9420_open(struct net_device *dev)
 {
-       struct smsc9420_pdata *pd;
+       struct smsc9420_pdata *pd = netdev_priv(dev);
        u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
+       const int irq = pd->pdev->irq;
        unsigned long flags;
        int result = 0, timeout;
 
-       BUG_ON(!dev);
-       pd = netdev_priv(dev);
-       BUG_ON(!pd);
-
        if (!is_valid_ether_addr(dev->dev_addr)) {
                smsc_warn(IFUP, "dev_addr is not a valid MAC address");
                result = -EADDRNOTAVAIL;
@@ -1358,9 +1359,10 @@ static int smsc9420_open(struct net_device *dev)
        smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
        smsc9420_pci_flush_write(pd);
 
-       if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
-                       DRV_NAME, pd)) {
-               smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq);
+       result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
+                            DRV_NAME, pd);
+       if (result) {
+               smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
                result = -ENODEV;
                goto out_0;
        }
@@ -1395,7 +1397,7 @@ static int smsc9420_open(struct net_device *dev)
        smsc9420_pci_flush_write(pd);
 
        /* test the IRQ connection to the ISR */
-       smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
+       smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
        pd->software_irq_signal = false;
 
        spin_lock_irqsave(&pd->int_lock, flags);
@@ -1430,7 +1432,7 @@ static int smsc9420_open(struct net_device *dev)
                goto out_free_irq_1;
        }
 
-       smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq);
+       smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
 
        result = smsc9420_alloc_tx_ring(pd);
        if (result) {
@@ -1490,7 +1492,7 @@ out_free_rx_ring_3:
 out_free_tx_ring_2:
        smsc9420_free_tx_ring(pd);
 out_free_irq_1:
-       free_irq(dev->irq, pd);
+       free_irq(irq, pd);
 out_0:
        return result;
 }
@@ -1519,7 +1521,7 @@ static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
                smsc9420_stop_rx(pd);
                smsc9420_free_rx_ring(pd);
 
-               free_irq(dev->irq, pd);
+               free_irq(pd->pdev->irq, pd);
 
                netif_device_detach(dev);
        }
@@ -1552,6 +1554,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
                smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
 
        if (netif_running(dev)) {
+               /* FIXME: gross. It looks like ancient PM relic.*/
                err = smsc9420_open(dev);
                netif_device_attach(dev);
        }
@@ -1625,8 +1628,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* registers are double mapped with 0 offset for LE and 0x200 for BE */
        virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
 
-       dev->base_addr = (ulong)virt_addr;
-
        pd = netdev_priv(dev);
 
        /* pci descriptors are created in the PCI consistent area */
@@ -1646,7 +1647,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        pd->pdev = pdev;
        pd->dev = dev;
-       pd->base_addr = virt_addr;
+       pd->ioaddr = virt_addr;
        pd->msg_enable = smsc_debug;
        pd->rx_csum = true;
 
@@ -1669,7 +1670,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        dev->netdev_ops = &smsc9420_netdev_ops;
        dev->ethtool_ops = &smsc9420_ethtool_ops;
-       dev->irq = pdev->irq;
 
        netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
 
@@ -1727,7 +1727,7 @@ static void __devexit smsc9420_remove(struct pci_dev *pdev)
        pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
                (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
 
-       iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET);
+       iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
index 0319d640f72839cd47ad331718bc2ed2c6972539..9e42b5d32cffd093690b5a1e1f6998f56fe31244 100644 (file)
@@ -97,6 +97,16 @@ struct stmmac_extra_stats {
        unsigned long normal_irq_n;
 };
 
+/* CSR Frequency Access Defines*/
+#define CSR_F_35M      35000000
+#define CSR_F_60M      60000000
+#define CSR_F_100M     100000000
+#define CSR_F_150M     150000000
+#define CSR_F_250M     250000000
+#define CSR_F_300M     300000000
+
+#define        MAC_CSR_H_FRQ_MASK      0x20
+
 #define HASH_TABLE_SIZE 64
 #define PAUSE_TIME 0x200
 
@@ -228,7 +238,7 @@ struct stmmac_desc_ops {
        int (*get_rx_owner) (struct dma_desc *p);
        void (*set_rx_owner) (struct dma_desc *p);
        /* Get the receive frame size */
-       int (*get_rx_frame_len) (struct dma_desc *p);
+       int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
        /* Return the reception status looking at the RDES1 */
        int (*rx_status) (void *data, struct stmmac_extra_stats *x,
                          struct dma_desc *p);
@@ -236,7 +246,8 @@ struct stmmac_desc_ops {
 
 struct stmmac_dma_ops {
        /* DMA core initialization */
-       int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+       int (*init) (void __iomem *ioaddr, int pbl, int fb, int burst_len,
+                       u32 dma_tx, u32 dma_rx);
        /* Dump DMA registers */
        void (*dump_regs) (void __iomem *ioaddr);
        /* Set tx/rx threshold in the csr6 register
@@ -261,8 +272,8 @@ struct stmmac_dma_ops {
 struct stmmac_ops {
        /* MAC core initialization */
        void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
-       /* Support checksum offload engine */
-       int  (*rx_coe) (void __iomem *ioaddr);
+       /* Enable and verify that the IPC module is supported */
+       int (*rx_ipc) (void __iomem *ioaddr);
        /* Dump MAC registers */
        void (*dump_regs) (void __iomem *ioaddr);
        /* Handle extra events on specific interrupts hw dependent */
index cfcef0ea0fa5db049120c5a2f097d031b29330a8..54339a78e35891a818a65dfb24897db37ff18b28 100644 (file)
@@ -142,7 +142,7 @@ enum rx_tx_priority_ratio {
 #define DMA_BUS_MODE_RPBL_MASK 0x003e0000      /* Rx-Programmable Burst Len */
 #define DMA_BUS_MODE_RPBL_SHIFT        17
 #define DMA_BUS_MODE_USP       0x00800000
-#define DMA_BUS_MODE_4PBL      0x01000000
+#define DMA_BUS_MODE_PBL       0x01000000
 #define DMA_BUS_MODE_AAL       0x02000000
 
 /* DMA CRS Control and Status Register Mapping */
index b1c48b975945b951c27e1853ad3388283bcf6dbe..e7cbcd99c2cbd1a3f4688e54fc69aa16e1f89c28 100644 (file)
@@ -46,7 +46,7 @@ static void dwmac1000_core_init(void __iomem *ioaddr)
 #endif
 }
 
-static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
+static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
 {
        u32 value = readl(ioaddr + GMAC_CONTROL);
 
@@ -211,7 +211,7 @@ static void dwmac1000_irq_status(void __iomem *ioaddr)
 
 static const struct stmmac_ops dwmac1000_ops = {
        .core_init = dwmac1000_core_init,
-       .rx_coe = dwmac1000_rx_coe_supported,
+       .rx_ipc = dwmac1000_rx_ipc_enable,
        .dump_regs = dwmac1000_dump_regs,
        .host_irq_status = dwmac1000_irq_status,
        .set_filter = dwmac1000_set_filter,
index 4d5402a1d262976bdede2fed63875000d81e17fb..3675c573156555560af2f33e8fd95d69037c721b 100644 (file)
@@ -30,8 +30,8 @@
 #include "dwmac1000.h"
 #include "dwmac_dma.h"
 
-static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
-                             u32 dma_rx)
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
+                             int burst_len, u32 dma_tx, u32 dma_rx)
 {
        u32 value = readl(ioaddr + DMA_BUS_MODE);
        int limit;
@@ -48,15 +48,47 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
        if (limit < 0)
                return -EBUSY;
 
-       value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
-           ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
-            (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+       /*
+        * Set the DMA PBL (Programmable Burst Length) mode
+        * Before stmmac core 3.50 this mode bit was 4xPBL, and
+        * post 3.5 mode bit acts as 8*PBL.
+        * For core rev < 3.5, when the core is set for 4xPBL mode, the
+        * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
+        * depending on pbl value.
+        * For core rev > 3.5, when the core is set for 8xPBL mode, the
+        * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
+        * depending on pbl value.
+        */
+       value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+               (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+       /* Set the Fixed burst mode */
+       if (fb)
+               value |= DMA_BUS_MODE_FB;
 
 #ifdef CONFIG_STMMAC_DA
        value |= DMA_BUS_MODE_DA;       /* Rx has priority over tx */
 #endif
        writel(value, ioaddr + DMA_BUS_MODE);
 
+       /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
+        * for supported bursts.
+        *
+        * Note: This is applicable only for revision GMACv3.61a. For
+        * older version this register is reserved and shall have no
+        * effect.
+        *
+        * Note:
+        *  For Fixed Burst Mode: if we directly write 0xFF to this
+        *  register using the configurations pass from platform code,
+        *  this would ensure that all bursts supported by core are set
+        *  and those which are not supported would remain ineffective.
+        *
+        *  For Non Fixed Burst Mode: provide the maximum value of the
+        *  burst length. Any burst equal or below the provided burst
+        *  length would be allowed to perform. */
+       writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
+
        /* Mask interrupts by writing to CSR7 */
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
 
index 138fb8dd1e878f3f5512cd4baf19b1d197a18e2b..efde50ff03f8691b66856ba05289ab5ff399c01e 100644 (file)
@@ -43,11 +43,6 @@ static void dwmac100_core_init(void __iomem *ioaddr)
 #endif
 }
 
-static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
-{
-       return 0;
-}
-
 static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
 {
        pr_info("\t----------------------------------------------\n"
@@ -72,6 +67,11 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
                readl(ioaddr + MAC_VLAN2));
 }
 
+static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
+{
+       return 0;
+}
+
 static void dwmac100_irq_status(void __iomem *ioaddr)
 {
        return;
@@ -160,7 +160,7 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
 
 static const struct stmmac_ops dwmac100_ops = {
        .core_init = dwmac100_core_init,
-       .rx_coe = dwmac100_rx_coe_supported,
+       .rx_ipc = dwmac100_rx_ipc_enable,
        .dump_regs = dwmac100_dump_mac_regs,
        .host_irq_status = dwmac100_irq_status,
        .set_filter = dwmac100_set_filter,
index bc17fd08b55dc9085a9ea66cefe1f388fd3ce985..92ed2e07609ef82cba0310565845743b8fa0b553 100644 (file)
@@ -32,8 +32,8 @@
 #include "dwmac100.h"
 #include "dwmac_dma.h"
 
-static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
-                            u32 dma_rx)
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
+                            int burst_len, u32 dma_tx, u32 dma_rx)
 {
        u32 value = readl(ioaddr + DMA_BUS_MODE);
        int limit;
@@ -52,7 +52,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
 
        /* Enable Application Access by writing to DMA CSR0 */
        writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
-              ioaddr + DMA_BUS_MODE);
+                       ioaddr + DMA_BUS_MODE);
 
        /* Mask interrupts by writing to CSR7 */
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
index 437edacd602e2de94ae46dd658fd9fa23f7c1849..6e0360f9cfde739983bf4a70886d8b1909cdea4e 100644 (file)
@@ -32,6 +32,7 @@
 #define DMA_CONTROL            0x00001018      /* Ctrl (Operational Mode) */
 #define DMA_INTR_ENA           0x0000101c      /* Interrupt Enable */
 #define DMA_MISSED_FRAME_CTR   0x00001020      /* Missed Frame Counter */
+#define DMA_AXI_BUS_MODE       0x00001028      /* AXI Bus Mode */
 #define DMA_CUR_TX_BUF_ADDR    0x00001050      /* Current Host Tx Buffer */
 #define DMA_CUR_RX_BUF_ADDR    0x00001054      /* Current Host Rx Buffer */
 #define DMA_HW_FEATURE         0x00001058      /* HW Feature Register */
index ad1b627f8ec2fe83607b6ef489f668825be645df..2fc8ef95f97af22b1d368dd15f40b6ea2e2a5070 100644 (file)
@@ -22,6 +22,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/stmmac.h>
 #include "common.h"
 #include "descs_com.h"
 
@@ -309,9 +310,17 @@ static void enh_desc_close_tx_desc(struct dma_desc *p)
        p->des01.etx.interrupt = 1;
 }
 
-static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 {
-       return p->des01.erx.frame_length;
+       /* The type-1 checksum offload engines append the checksum at
+        * the end of frame and the two bytes of checksum are added in
+        * the length.
+        * Adjust for that in the framelen for type-1 checksum offload
+        * engines. */
+       if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+               return p->des01.erx.frame_length - 2;
+       else
+               return p->des01.erx.frame_length;
 }
 
 const struct stmmac_desc_ops enh_desc_ops = {
index 25953bb45a736acdd517859b98c52850715765b8..68962c549a2d6296815853abaa10cd92e56cfb39 100644 (file)
@@ -22,6 +22,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/stmmac.h>
 #include "common.h"
 #include "descs_com.h"
 
@@ -201,9 +202,17 @@ static void ndesc_close_tx_desc(struct dma_desc *p)
        p->des01.tx.interrupt = 1;
 }
 
-static int ndesc_get_rx_frame_len(struct dma_desc *p)
+static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 {
-       return p->des01.rx.frame_length;
+       /* The type-1 checksum offload engines append the checksum at
+        * the end of frame and the two bytes of checksum are added in
+        * the length.
+        * Adjust for that in the framelen for type-1 checksum offload
+        * engines. */
+       if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+               return p->des01.rx.frame_length - 2;
+       else
+               return p->des01.rx.frame_length;
 }
 
 const struct stmmac_desc_ops ndesc_ops = {
index b4b095fdcf2964f87bdb433b223b940dcb683687..9f2435c53f57d8fefa459ea9f17ffa292f945dd4 100644 (file)
@@ -21,7 +21,9 @@
 *******************************************************************************/
 
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
-#define DRV_MODULE_VERSION     "Feb_2012"
+#define DRV_MODULE_VERSION     "March_2012"
+
+#include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include "common.h"
@@ -56,8 +58,6 @@ struct stmmac_priv {
 
        struct stmmac_extra_stats xstats;
        struct napi_struct napi;
-
-       int rx_coe;
        int no_csum_insertion;
 
        struct phy_device *phydev;
@@ -81,6 +81,10 @@ struct stmmac_priv {
        struct stmmac_counters mmc;
        struct dma_features dma_cap;
        int hw_cap_support;
+#ifdef CONFIG_HAVE_CLK
+       struct clk *stmmac_clk;
+#endif
+       int clk_csr;
 };
 
 extern int phyaddr;
@@ -99,3 +103,41 @@ int stmmac_dvr_remove(struct net_device *ndev);
 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
                                     struct plat_stmmacenet_data *plat_dat,
                                     void __iomem *addr);
+
+#ifdef CONFIG_HAVE_CLK
+static inline int stmmac_clk_enable(struct stmmac_priv *priv)
+{
+       if (priv->stmmac_clk)
+               return clk_enable(priv->stmmac_clk);
+
+       return 0;
+}
+
+static inline void stmmac_clk_disable(struct stmmac_priv *priv)
+{
+       if (priv->stmmac_clk)
+               clk_disable(priv->stmmac_clk);
+}
+static inline int stmmac_clk_get(struct stmmac_priv *priv)
+{
+       priv->stmmac_clk = clk_get(priv->device, NULL);
+
+       if (IS_ERR(priv->stmmac_clk)) {
+               pr_err("%s: ERROR clk_get failed\n", __func__);
+               return PTR_ERR(priv->stmmac_clk);
+       }
+       return 0;
+}
+#else
+static inline int stmmac_clk_enable(struct stmmac_priv *priv)
+{
+       return 0;
+}
+static inline void stmmac_clk_disable(struct stmmac_priv *priv)
+{
+}
+static inline int stmmac_clk_get(struct stmmac_priv *priv)
+{
+       return 0;
+}
+#endif /* CONFIG_HAVE_CLK */
index f98e1511660fe7c2136d345d7efa0bb12e05dec2..ce431846fc6fdb720f739d538a1df34346d35bab 100644 (file)
@@ -481,6 +481,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
        .get_wol = stmmac_get_wol,
        .set_wol = stmmac_set_wol,
        .get_sset_count = stmmac_get_sset_count,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 void stmmac_set_ethtool_ops(struct net_device *netdev)
index 48d56da62f08e94a6b4b26bf594aa8ae022a36bc..a64f0d422e76894bcec6b1cbb0ec09cdf665f5bf 100644 (file)
@@ -163,6 +163,35 @@ static void stmmac_verify_args(void)
                pause = PAUSE_TIME;
 }
 
+static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+#ifdef CONFIG_HAVE_CLK
+       u32 clk_rate;
+
+       clk_rate = clk_get_rate(priv->stmmac_clk);
+
+       /* Platform provided default clk_csr would be assumed valid
+        * for all other cases except for the below mentioned ones. */
+       if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
+               if (clk_rate < CSR_F_35M)
+                       priv->clk_csr = STMMAC_CSR_20_35M;
+               else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
+                       priv->clk_csr = STMMAC_CSR_35_60M;
+               else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
+                       priv->clk_csr = STMMAC_CSR_60_100M;
+               else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
+                       priv->clk_csr = STMMAC_CSR_100_150M;
+               else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+                       priv->clk_csr = STMMAC_CSR_150_250M;
+               else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+                       priv->clk_csr = STMMAC_CSR_250_300M;
+       } /* For values higher than the IEEE 802.3 specified frequency
+          * we can not estimate the proper divider as it is not known
+          * the frequency of clk_csr_i. So we do not change the default
+          * divider. */
+#endif
+}
+
 #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
 static void print_pkt(unsigned char *buf, int len)
 {
@@ -307,7 +336,13 @@ static int stmmac_init_phy(struct net_device *dev)
        priv->speed = 0;
        priv->oldduplex = -1;
 
-       snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id);
+       if (priv->plat->phy_bus_name)
+               snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+                               priv->plat->phy_bus_name, priv->plat->bus_id);
+       else
+               snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+                               priv->plat->bus_id);
+
        snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
                 priv->plat->phy_addr);
        pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
@@ -898,6 +933,8 @@ static int stmmac_open(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret;
 
+       stmmac_clk_enable(priv);
+
        stmmac_check_ether_addr(priv);
 
        /* MDIO bus Registration */
@@ -905,13 +942,15 @@ static int stmmac_open(struct net_device *dev)
        if (ret < 0) {
                pr_debug("%s: MDIO bus (id: %d) registration failed",
                         __func__, priv->plat->bus_id);
-               return ret;
+               goto open_clk_dis;
        }
 
 #ifdef CONFIG_STMMAC_TIMER
        priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
-       if (unlikely(priv->tm == NULL))
-               return -ENOMEM;
+       if (unlikely(priv->tm == NULL)) {
+               ret = -ENOMEM;
+               goto open_clk_dis;
+       }
 
        priv->tm->freq = tmrate;
 
@@ -938,7 +977,9 @@ static int stmmac_open(struct net_device *dev)
        init_dma_desc_rings(dev);
 
        /* DMA initialization and SW reset */
-       ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
+       ret = priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg->pbl,
+                                 priv->plat->dma_cfg->fixed_burst,
+                                 priv->plat->dma_cfg->burst_len,
                                  priv->dma_tx_phy, priv->dma_rx_phy);
        if (ret < 0) {
                pr_err("%s: DMA initialization failed\n", __func__);
@@ -1026,6 +1067,8 @@ open_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
 
+open_clk_dis:
+       stmmac_clk_disable(priv);
        return ret;
 }
 
@@ -1078,6 +1121,7 @@ static int stmmac_release(struct net_device *dev)
        stmmac_exit_fs();
 #endif
        stmmac_mdio_unregister(dev);
+       stmmac_clk_disable(priv);
 
        return 0;
 }
@@ -1276,7 +1320,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                        struct sk_buff *skb;
                        int frame_len;
 
-                       frame_len = priv->hw->desc->get_rx_frame_len(p);
+                       frame_len = priv->hw->desc->get_rx_frame_len(p,
+                                       priv->plat->rx_coe);
                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
                         * Type frames (LLC/LLC-SNAP) */
                        if (unlikely(status != llc_snap))
@@ -1312,7 +1357,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
 #endif
                        skb->protocol = eth_type_trans(skb, priv->dev);
 
-                       if (unlikely(!priv->rx_coe)) {
+                       if (unlikely(!priv->plat->rx_coe)) {
                                /* No RX COE for old mac10/100 devices */
                                skb_checksum_none_assert(skb);
                                netif_receive_skb(skb);
@@ -1459,8 +1504,10 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (!priv->rx_coe)
+       if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
                features &= ~NETIF_F_RXCSUM;
+       else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
+               features &= ~NETIF_F_IPV6_CSUM;
        if (!priv->plat->tx_coe)
                features &= ~NETIF_F_ALL_CSUM;
 
@@ -1765,17 +1812,32 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
                 * register (if supported).
                 */
                priv->plat->enh_desc = priv->dma_cap.enh_desc;
-               priv->plat->tx_coe = priv->dma_cap.tx_coe;
                priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+
+               priv->plat->tx_coe = priv->dma_cap.tx_coe;
+
+               if (priv->dma_cap.rx_coe_type2)
+                       priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
+               else if (priv->dma_cap.rx_coe_type1)
+                       priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+
        } else
                pr_info(" No HW DMA feature register supported");
 
        /* Select the enhnaced/normal descriptor structures */
        stmmac_selec_desc_mode(priv);
 
-       priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
-       if (priv->rx_coe)
-               pr_info(" RX Checksum Offload Engine supported\n");
+       /* Enable the IPC (Checksum Offload) and check if the feature has been
+        * enabled during the core configuration. */
+       ret = priv->hw->mac->rx_ipc(priv->ioaddr);
+       if (!ret) {
+               pr_warning(" RX IPC Checksum Offload not configured.\n");
+               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+       }
+
+       if (priv->plat->rx_coe)
+               pr_info(" RX Checksum Offload Engine supported (type %d)\n",
+                       priv->plat->rx_coe);
        if (priv->plat->tx_coe)
                pr_info(" TX Checksum insertion supported\n");
 
@@ -1856,6 +1918,20 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
                goto error;
        }
 
+       if (stmmac_clk_get(priv))
+               goto error;
+
+       /* If a specific clk_csr value is passed from the platform
+        * this means that the CSR Clock Range selection cannot be
+        * changed at run-time and it is fixed. Viceversa the driver'll try to
+        * set the MDC clock dynamically according to the csr actual
+        * clock input.
+        */
+       if (!priv->plat->clk_csr)
+               stmmac_clk_csr_set(priv);
+       else
+               priv->clk_csr = priv->plat->clk_csr;
+
        return priv;
 
 error:
@@ -1925,9 +2001,11 @@ int stmmac_suspend(struct net_device *ndev)
        /* Enable Power down mode by programming the PMT regs */
        if (device_may_wakeup(priv->device))
                priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
-       else
+       else {
                stmmac_set_mac(priv->ioaddr, false);
-
+               /* Disable clock in case of PWM is off */
+               stmmac_clk_disable(priv);
+       }
        spin_unlock(&priv->lock);
        return 0;
 }
@@ -1948,6 +2026,9 @@ int stmmac_resume(struct net_device *ndev)
         * from another devices (e.g. serial console). */
        if (device_may_wakeup(priv->device))
                priv->hw->mac->pmt(priv->ioaddr, 0);
+       else
+               /* enable the clk prevously disabled */
+               stmmac_clk_enable(priv);
 
        netif_device_attach(ndev);
 
index 73195329aa464b8b5b9a7c4cadd2aaaa36f134ad..ade108232048aca7771e2a3d0ea9d6d5a15f7a45 100644 (file)
 #define MII_BUSY 0x00000001
 #define MII_WRITE 0x00000002
 
+static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
+{
+       unsigned long curr;
+       unsigned long finish = jiffies + 3 * HZ;
+
+       do {
+               curr = jiffies;
+               if (readl(ioaddr + mii_addr) & MII_BUSY)
+                       cpu_relax();
+               else
+                       return 0;
+       } while (!time_after_eq(curr, finish));
+
+       return -EBUSY;
+}
+
 /**
  * stmmac_mdio_read
  * @bus: points to the mii_bus structure
@@ -54,11 +70,15 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
        int data;
        u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
                        ((phyreg << 6) & (0x000007C0)));
-       regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
+       regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
+
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
 
-       do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
        writel(regValue, priv->ioaddr + mii_address);
-       do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
 
        /* Read the data from the MII data register */
        data = (int)readl(priv->ioaddr + mii_data);
@@ -86,20 +106,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
            (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
            | MII_WRITE;
 
-       value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
-
+       value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
 
        /* Wait until any existing MII operation is complete */
-       do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
 
        /* Set the MII address register to write */
        writel(phydata, priv->ioaddr + mii_data);
        writel(value, priv->ioaddr + mii_address);
 
        /* Wait until any existing MII operation is complete */
-       do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
-
-       return 0;
+       return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
 }
 
 /**
index da66ed7c3c5d7de297ded16490fdc3be302fd75a..65e0f98520d64fe65c28ddda18e94a0b2960f455 100644 (file)
@@ -35,7 +35,8 @@ static void stmmac_default_data(void)
        plat_dat.bus_id = 1;
        plat_dat.phy_addr = 0;
        plat_dat.interface = PHY_INTERFACE_MODE_GMII;
-       plat_dat.pbl = 32;
+       plat_dat.dma_cfg->pbl = 32;
+       plat_dat.dma_cfg->burst_len = DMA_AXI_BLEN_256;
        plat_dat.clk_csr = 2;   /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
        plat_dat.has_gmac = 1;
        plat_dat.force_sf_dma_mode = 1;
index 116529a366b28d319983dbee62d930554580901a..12bd221561e575203bb9d2457f9e0e98e26961b9 100644 (file)
@@ -50,7 +50,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
         * once needed on other platforms.
         */
        if (of_device_is_compatible(np, "st,spear600-gmac")) {
-               plat->pbl = 8;
+               plat->dma_cfg->pbl = 8;
                plat->has_gmac = 1;
                plat->pmt = 1;
        }
index 558409ff40582fa9f5cd1ae91248f9d214c51d3e..dfd4b1d13a512245372c0ecc11bc3b7e7ad5e837 100644 (file)
@@ -2898,7 +2898,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
        }
 
        gp->pdev = pdev;
-       dev->base_addr = (long) pdev;
        gp->dev = dev;
 
        gp->msg_enable = DEFAULT_MSG;
@@ -2972,7 +2971,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
        netif_napi_add(dev, &gp->napi, gem_poll, 64);
        dev->ethtool_ops = &gem_ethtool_ops;
        dev->watchdog_timeo = 5 * HZ;
-       dev->irq = pdev->irq;
        dev->dma = 0;
 
        /* Set that now, in case PM kicks in now */
index b95e7e681b38cfc59e5566fb6c53cce2174c9bd9..dfc00c4683e5ab68622032940bd25b289f59add3 100644 (file)
@@ -2182,11 +2182,12 @@ static int happy_meal_open(struct net_device *dev)
         * into a single source which we register handling at probe time.
         */
        if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
-               if (request_irq(dev->irq, happy_meal_interrupt,
-                               IRQF_SHARED, dev->name, (void *)dev)) {
+               res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
+                                 dev->name, dev);
+               if (res) {
                        HMD(("EAGAIN\n"));
                        printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
-                              dev->irq);
+                              hp->irq);
 
                        return -EAGAIN;
                }
@@ -2199,7 +2200,7 @@ static int happy_meal_open(struct net_device *dev)
        spin_unlock_irq(&hp->happy_lock);
 
        if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
-               free_irq(dev->irq, dev);
+               free_irq(hp->irq, dev);
        return res;
 }
 
@@ -2221,7 +2222,7 @@ static int happy_meal_close(struct net_device *dev)
         * time and never unregister.
         */
        if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
-               free_irq(dev->irq, dev);
+               free_irq(hp->irq, dev);
 
        return 0;
 }
@@ -2777,7 +2778,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
        dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 
-       dev->irq = op->archdata.irqs[0];
+       hp->irq = op->archdata.irqs[0];
 
 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
        /* Hook up SBUS register/descriptor accessors. */
@@ -2981,8 +2982,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
        if (hme_version_printed++ == 0)
                printk(KERN_INFO "%s", version);
 
-       dev->base_addr = (long) pdev;
-
        hp = netdev_priv(dev);
 
        hp->happy_dev = pdev;
@@ -3087,12 +3086,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
 
        init_timer(&hp->happy_timer);
 
+       hp->irq = pdev->irq;
        hp->dev = dev;
        dev->netdev_ops = &hme_netdev_ops;
        dev->watchdog_timeo = 5*HZ;
        dev->ethtool_ops = &hme_ethtool_ops;
-       dev->irq = pdev->irq;
-       dev->dma = 0;
 
        /* Happy Meal can do it all... */
        dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
index 64f278360d892e4415e767f77fdc11e5e92f65dc..f4307654e4ae623c519a43b1202d3bec1949ffe2 100644 (file)
@@ -432,6 +432,7 @@ struct happy_meal {
 
        dma_addr_t                hblock_dvma;    /* DVMA visible address happy block  */
        unsigned int              happy_flags;    /* Driver state flags                */
+       int                       irq;
        enum happy_transceiver    tcvr_type;      /* Kind of transceiver in use        */
        unsigned int              happy_bursts;   /* Get your mind out of the gutter   */
        unsigned int              paddr;          /* PHY address for transceiver       */
index ad973ffc9ff363acb4ecd923bcd3d93a3b78dc18..dc242e28dbb52d2464948e78f2431d075ae0ba1d 100644 (file)
@@ -1988,10 +1988,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                /* these fields are used for info purposes only
                 * so we can have them same for all ports of the board */
                ndev->if_port = port;
-               ndev->base_addr = pciaddr;
-               ndev->mem_start = pciaddr;
-               ndev->mem_end = pciaddr + regionSize;
-               ndev->irq = pdev->irq;
                ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
                    | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
                    NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
index 174a3348f6762d67a661801342ea6cbe95a7810f..8aa33326bec3507d07ed0d3c9b46dab081023996 100644 (file)
@@ -627,6 +627,7 @@ static const struct ethtool_ops ethtool_ops = {
        .get_link = ethtool_op_get_link,
        .get_coalesce = emac_get_coalesce,
        .set_coalesce =  emac_set_coalesce,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 /**
index fcfa01f7ceb6983f649d14cf125d8e64d7319c86..0459c096629f0b71df679cdf2fa2bd28449f0d26 100644 (file)
@@ -689,9 +689,12 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void rhine_poll(struct net_device *dev)
 {
-       disable_irq(dev->irq);
-       rhine_interrupt(dev->irq, (void *)dev);
-       enable_irq(dev->irq);
+       struct rhine_private *rp = netdev_priv(dev);
+       const int irq = rp->pdev->irq;
+
+       disable_irq(irq);
+       rhine_interrupt(irq, dev);
+       enable_irq(irq);
 }
 #endif
 
@@ -972,7 +975,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
        }
 #endif /* USE_MMIO */
 
-       dev->base_addr = (unsigned long)ioaddr;
        rp->base = ioaddr;
 
        /* Get chip registers into a sane state */
@@ -995,8 +997,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
        if (!phy_id)
                phy_id = ioread8(ioaddr + 0x6C);
 
-       dev->irq = pdev->irq;
-
        spin_lock_init(&rp->lock);
        mutex_init(&rp->task_lock);
        INIT_WORK(&rp->reset_task, rhine_reset_task);
index 8a5d7c100a5e30552bbd28baef3bdbd2da9e8cfb..ea3e0a21ba74e109521a1dac3ef146821e12381d 100644 (file)
@@ -2488,8 +2488,8 @@ static int velocity_close(struct net_device *dev)
 
        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
                velocity_get_ip(vptr);
-       if (dev->irq != 0)
-               free_irq(dev->irq, dev);
+
+       free_irq(vptr->pdev->irq, dev);
 
        velocity_free_rings(vptr);
 
@@ -2755,8 +2755,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
        if (ret < 0)
                goto err_free_dev;
 
-       dev->irq = pdev->irq;
-
        ret = velocity_get_pci_info(vptr, pdev);
        if (ret < 0) {
                /* error message already printed */
@@ -2779,8 +2777,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
 
        mac_wol_reset(regs);
 
-       dev->base_addr = vptr->ioaddr;
-
        for (i = 0; i < 6; i++)
                dev->dev_addr[i] = readb(&regs->PAR[i]);
 
@@ -2806,7 +2802,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
 
        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
 
-       dev->irq = pdev->irq;
        dev->netdev_ops = &velocity_netdev_ops;
        dev->ethtool_ops = &velocity_ethtool_ops;
        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
new file mode 100644 (file)
index 0000000..cb18043
--- /dev/null
@@ -0,0 +1,73 @@
+#
+# WIZnet devices configuration
+#
+
+config NET_VENDOR_WIZNET
+       bool "WIZnet devices"
+       default y
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say Y
+         and read the Ethernet-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about WIZnet devices. If you say Y, you will be asked
+         for your specific card in the following questions.
+
+if NET_VENDOR_WIZNET
+
+config WIZNET_W5100
+       tristate "WIZnet W5100 Ethernet support"
+       depends on HAS_IOMEM
+       ---help---
+         Support for WIZnet W5100 chips.
+
+         W5100 is a single chip with integrated 10/100 Ethernet MAC,
+         PHY and hardware TCP/IP stack, but this driver is limited to
+         the MAC and PHY functions only, onchip TCP/IP is unused.
+
+         To compile this driver as a module, choose M here: the module
+         will be called w5100.
+
+config WIZNET_W5300
+       tristate "WIZnet W5300 Ethernet support"
+       depends on HAS_IOMEM
+       ---help---
+         Support for WIZnet W5300 chips.
+
+         W5300 is a single chip with integrated 10/100 Ethernet MAC,
+         PHY and hardware TCP/IP stack, but this driver is limited to
+         the MAC and PHY functions only, onchip TCP/IP is unused.
+
+         To compile this driver as a module, choose M here: the module
+         will be called w5300.
+
+choice
+       prompt "WIZnet interface mode"
+       depends on WIZNET_W5100 || WIZNET_W5300
+       default WIZNET_BUS_ANY
+
+config WIZNET_BUS_DIRECT
+       bool "Direct address bus mode"
+       ---help---
+         In direct address mode host system can directly access all registers
+         after mapping to Memory-Mapped I/O space.
+
+config WIZNET_BUS_INDIRECT
+       bool "Indirect address bus mode"
+       ---help---
+         In indirect address mode host system indirectly accesses registers
+         using Indirect Mode Address Register and Indirect Mode Data Register,
+         which are directly mapped to Memory-Mapped I/O space.
+
+config WIZNET_BUS_ANY
+       bool "Select interface mode in runtime"
+       ---help---
+         If interface mode is unknown in compile time, it can be selected
+         in runtime from board/platform resources configuration.
+
+         Performance may decrease compared to explicitly selected bus mode.
+endchoice
+
+endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
new file mode 100644 (file)
index 0000000..c614535
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
new file mode 100644 (file)
index 0000000..18c8098
--- /dev/null
@@ -0,0 +1,808 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kconfig.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/wiznet.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#define DRV_NAME       "w5100"
+#define DRV_VERSION    "2012-04-04"
+
+MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
+MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
+MODULE_ALIAS("platform:"DRV_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * Registers
+ */
+#define W5100_COMMON_REGS      0x0000
+#define W5100_MR               0x0000 /* Mode Register */
+#define   MR_RST                 0x80 /* S/W reset */
+#define   MR_PB                          0x10 /* Ping block */
+#define   MR_AI                          0x02 /* Address Auto-Increment */
+#define   MR_IND                 0x01 /* Indirect mode */
+#define W5100_SHAR             0x0009 /* Source MAC address */
+#define W5100_IR               0x0015 /* Interrupt Register */
+#define W5100_IMR              0x0016 /* Interrupt Mask Register */
+#define   IR_S0                          0x01 /* S0 interrupt */
+#define W5100_RTR              0x0017 /* Retry Time-value Register */
+#define   RTR_DEFAULT            2000 /* =0x07d0 (2000) */
+#define W5100_RMSR             0x001a /* Receive Memory Size */
+#define W5100_TMSR             0x001b /* Transmit Memory Size */
+#define W5100_COMMON_REGS_LEN  0x0040
+
+#define W5100_S0_REGS          0x0400
+#define W5100_S0_MR            0x0400 /* S0 Mode Register */
+#define   S0_MR_MACRAW           0x04 /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW_MF        0x44 /* MAC RAW mode (filtered) */
+#define W5100_S0_CR            0x0401 /* S0 Command Register */
+#define   S0_CR_OPEN             0x01 /* OPEN command */
+#define   S0_CR_CLOSE            0x10 /* CLOSE command */
+#define   S0_CR_SEND             0x20 /* SEND command */
+#define   S0_CR_RECV             0x40 /* RECV command */
+#define W5100_S0_IR            0x0402 /* S0 Interrupt Register */
+#define   S0_IR_SENDOK           0x10 /* complete sending */
+#define   S0_IR_RECV             0x04 /* receiving data */
+#define W5100_S0_SR            0x0403 /* S0 Status Register */
+#define   S0_SR_MACRAW           0x42 /* mac raw mode */
+#define W5100_S0_TX_FSR                0x0420 /* S0 Transmit free memory size */
+#define W5100_S0_TX_RD         0x0422 /* S0 Transmit memory read pointer */
+#define W5100_S0_TX_WR         0x0424 /* S0 Transmit memory write pointer */
+#define W5100_S0_RX_RSR                0x0426 /* S0 Receive free memory size */
+#define W5100_S0_RX_RD         0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_REGS_LEN      0x0040
+
+#define W5100_TX_MEM_START     0x4000
+#define W5100_TX_MEM_END       0x5fff
+#define W5100_TX_MEM_MASK      0x1fff
+#define W5100_RX_MEM_START     0x6000
+#define W5100_RX_MEM_END       0x7fff
+#define W5100_RX_MEM_MASK      0x1fff
+
+/*
+ * Device driver private data structure
+ */
+struct w5100_priv {
+       void __iomem *base;
+       spinlock_t reg_lock;
+       bool indirect;
+       u8   (*read)(struct w5100_priv *priv, u16 addr);
+       void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
+       u16  (*read16)(struct w5100_priv *priv, u16 addr);
+       void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
+       void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+       void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+       int irq;
+       int link_irq;
+       int link_gpio;
+
+       struct napi_struct napi;
+       struct net_device *ndev;
+       bool promisc;
+       u32 msg_enable;
+};
+
+/************************************************************************
+ *
+ *  Lowlevel I/O functions
+ *
+ ***********************************************************************/
+
+/*
+ * In direct address mode host system can directly access W5100 registers
+ * after mapping to Memory-Mapped I/O space.
+ *
+ * 0x8000 bytes are required for memory space.
+ */
+static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+{
+       return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static inline void w5100_write_direct(struct w5100_priv *priv,
+                                     u16 addr, u8 data)
+{
+       iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+{
+       u16 data;
+       data  = w5100_read_direct(priv, addr) << 8;
+       data |= w5100_read_direct(priv, addr + 1);
+       return data;
+}
+
+static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+{
+       w5100_write_direct(priv, addr, data >> 8);
+       w5100_write_direct(priv, addr + 1, data);
+}
+
+static void w5100_readbuf_direct(struct w5100_priv *priv,
+                                u16 offset, u8 *buf, int len)
+{
+       u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+       int i;
+
+       for (i = 0; i < len; i++, addr++) {
+               if (unlikely(addr > W5100_RX_MEM_END))
+                       addr = W5100_RX_MEM_START;
+               *buf++ = w5100_read_direct(priv, addr);
+       }
+}
+
+static void w5100_writebuf_direct(struct w5100_priv *priv,
+                                 u16 offset, u8 *buf, int len)
+{
+       u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+       int i;
+
+       for (i = 0; i < len; i++, addr++) {
+               if (unlikely(addr > W5100_TX_MEM_END))
+                       addr = W5100_TX_MEM_START;
+               w5100_write_direct(priv, addr, *buf++);
+       }
+}
+
+/*
+ * In indirect address mode host system indirectly accesses registers by
+ * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
+ * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
+ * Mode Register (MR) is directly accessible.
+ *
+ * Only 0x04 bytes are required for memory space.
+ */
+#define W5100_IDM_AR           0x01   /* Indirect Mode Address Register */
+#define W5100_IDM_DR           0x03   /* Indirect Mode Data Register */
+
+static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+{
+       unsigned long flags;
+       u8 data;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+       mmiowb();
+       data = w5100_read_direct(priv, W5100_IDM_DR);
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+       return data;
+}
+
+static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+       mmiowb();
+       w5100_write_direct(priv, W5100_IDM_DR, data);
+       mmiowb();
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+{
+       unsigned long flags;
+       u16 data;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+       mmiowb();
+       data  = w5100_read_direct(priv, W5100_IDM_DR) << 8;
+       data |= w5100_read_direct(priv, W5100_IDM_DR);
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+       return data;
+}
+
+static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+       mmiowb();
+       w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
+       w5100_write_direct(priv, W5100_IDM_DR, data);
+       mmiowb();
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static void w5100_readbuf_indirect(struct w5100_priv *priv,
+                                  u16 offset, u8 *buf, int len)
+{
+       u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+       mmiowb();
+
+       for (i = 0; i < len; i++, addr++) {
+               if (unlikely(addr > W5100_RX_MEM_END)) {
+                       addr = W5100_RX_MEM_START;
+                       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+                       mmiowb();
+               }
+               *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
+       }
+       mmiowb();
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static void w5100_writebuf_indirect(struct w5100_priv *priv,
+                                   u16 offset, u8 *buf, int len)
+{
+       u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+       mmiowb();
+
+       for (i = 0; i < len; i++, addr++) {
+               if (unlikely(addr > W5100_TX_MEM_END)) {
+                       addr = W5100_TX_MEM_START;
+                       w5100_write16_direct(priv, W5100_IDM_AR, addr);
+                       mmiowb();
+               }
+               w5100_write_direct(priv, W5100_IDM_DR, *buf++);
+       }
+       mmiowb();
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+#if defined(CONFIG_WIZNET_BUS_DIRECT)
+#define w5100_read     w5100_read_direct
+#define w5100_write    w5100_write_direct
+#define w5100_read16   w5100_read16_direct
+#define w5100_write16  w5100_write16_direct
+#define w5100_readbuf  w5100_readbuf_direct
+#define w5100_writebuf w5100_writebuf_direct
+
+#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
+#define w5100_read     w5100_read_indirect
+#define w5100_write    w5100_write_indirect
+#define w5100_read16   w5100_read16_indirect
+#define w5100_write16  w5100_write16_indirect
+#define w5100_readbuf  w5100_readbuf_indirect
+#define w5100_writebuf w5100_writebuf_indirect
+
+#else /* CONFIG_WIZNET_BUS_ANY */
+#define w5100_read     priv->read
+#define w5100_write    priv->write
+#define w5100_read16   priv->read16
+#define w5100_write16  priv->write16
+#define w5100_readbuf  priv->readbuf
+#define w5100_writebuf priv->writebuf
+#endif
+
+static int w5100_command(struct w5100_priv *priv, u16 cmd)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+       w5100_write(priv, W5100_S0_CR, cmd);
+       mmiowb();
+
+       while (w5100_read(priv, W5100_S0_CR) != 0) {
+               if (time_after(jiffies, timeout))
+                       return -EIO;
+               cpu_relax();
+       }
+
+       return 0;
+}
+
+static void w5100_write_macaddr(struct w5100_priv *priv)
+{
+       struct net_device *ndev = priv->ndev;
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
+       mmiowb();
+}
+
+static void w5100_hw_reset(struct w5100_priv *priv)
+{
+       w5100_write_direct(priv, W5100_MR, MR_RST);
+       mmiowb();
+       mdelay(5);
+       w5100_write_direct(priv, W5100_MR, priv->indirect ?
+                                 MR_PB | MR_AI | MR_IND :
+                                 MR_PB);
+       mmiowb();
+       w5100_write(priv, W5100_IMR, 0);
+       w5100_write_macaddr(priv);
+
+       /* Configure 16K of internal memory
+        * as 8K RX buffer and 8K TX buffer
+        */
+       w5100_write(priv, W5100_RMSR, 0x03);
+       w5100_write(priv, W5100_TMSR, 0x03);
+       mmiowb();
+}
+
+static void w5100_hw_start(struct w5100_priv *priv)
+{
+       w5100_write(priv, W5100_S0_MR, priv->promisc ?
+                         S0_MR_MACRAW : S0_MR_MACRAW_MF);
+       mmiowb();
+       w5100_command(priv, S0_CR_OPEN);
+       w5100_write(priv, W5100_IMR, IR_S0);
+       mmiowb();
+}
+
+static void w5100_hw_close(struct w5100_priv *priv)
+{
+       w5100_write(priv, W5100_IMR, 0);
+       mmiowb();
+       w5100_command(priv, S0_CR_CLOSE);
+}
+
+/***********************************************************************
+ *
+ *   Device driver functions / callbacks
+ *
+ ***********************************************************************/
+
+static void w5100_get_drvinfo(struct net_device *ndev,
+                             struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+               sizeof(info->bus_info));
+}
+
+static u32 w5100_get_link(struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       if (gpio_is_valid(priv->link_gpio))
+               return !!gpio_get_value(priv->link_gpio);
+
+       return 1;
+}
+
+static u32 w5100_get_msglevel(struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       return priv->msg_enable;
+}
+
+static void w5100_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       priv->msg_enable = value;
+}
+
+static int w5100_get_regs_len(struct net_device *ndev)
+{
+       return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
+}
+
+static void w5100_get_regs(struct net_device *ndev,
+                          struct ethtool_regs *regs, void *_buf)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+       u8 *buf = _buf;
+       u16 i;
+
+       regs->version = 1;
+       for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
+               *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
+       for (i = 0; i < W5100_S0_REGS_LEN; i++)
+               *buf++ = w5100_read(priv, W5100_S0_REGS + i);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       w5100_hw_reset(priv);
+       w5100_hw_start(priv);
+       ndev->stats.tx_errors++;
+       ndev->trans_start = jiffies;
+       netif_wake_queue(ndev);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+       u16 offset;
+
+       netif_stop_queue(ndev);
+
+       offset = w5100_read16(priv, W5100_S0_TX_WR);
+       w5100_writebuf(priv, offset, skb->data, skb->len);
+       w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
+       mmiowb();
+       ndev->stats.tx_bytes += skb->len;
+       ndev->stats.tx_packets++;
+       dev_kfree_skb(skb);
+
+       w5100_command(priv, S0_CR_SEND);
+
+       return NETDEV_TX_OK;
+}
+
+static int w5100_napi_poll(struct napi_struct *napi, int budget)
+{
+       struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
+       struct net_device *ndev = priv->ndev;
+       struct sk_buff *skb;
+       int rx_count;
+       u16 rx_len;
+       u16 offset;
+       u8 header[2];
+
+       for (rx_count = 0; rx_count < budget; rx_count++) {
+               u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
+               if (rx_buf_len == 0)
+                       break;
+
+               offset = w5100_read16(priv, W5100_S0_RX_RD);
+               w5100_readbuf(priv, offset, header, 2);
+               rx_len = get_unaligned_be16(header) - 2;
+
+               skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+               if (unlikely(!skb)) {
+                       w5100_write16(priv, W5100_S0_RX_RD,
+                                           offset + rx_buf_len);
+                       w5100_command(priv, S0_CR_RECV);
+                       ndev->stats.rx_dropped++;
+                       return -ENOMEM;
+               }
+
+               skb_put(skb, rx_len);
+               w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+               w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
+               mmiowb();
+               w5100_command(priv, S0_CR_RECV);
+               skb->protocol = eth_type_trans(skb, ndev);
+
+               netif_receive_skb(skb);
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += rx_len;
+       }
+
+       if (rx_count < budget) {
+               w5100_write(priv, W5100_IMR, IR_S0);
+               mmiowb();
+               napi_complete(napi);
+       }
+
+       return rx_count;
+}
+
+static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
+{
+       struct net_device *ndev = ndev_instance;
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       int ir = w5100_read(priv, W5100_S0_IR);
+       if (!ir)
+               return IRQ_NONE;
+       w5100_write(priv, W5100_S0_IR, ir);
+       mmiowb();
+
+       if (ir & S0_IR_SENDOK) {
+               netif_dbg(priv, tx_done, ndev, "tx done\n");
+               netif_wake_queue(ndev);
+       }
+
+       if (ir & S0_IR_RECV) {
+               if (napi_schedule_prep(&priv->napi)) {
+                       w5100_write(priv, W5100_IMR, 0);
+                       mmiowb();
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
+{
+       struct net_device *ndev = ndev_instance;
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       if (netif_running(ndev)) {
+               if (gpio_get_value(priv->link_gpio) != 0) {
+                       netif_info(priv, link, ndev, "link is up\n");
+                       netif_carrier_on(ndev);
+               } else {
+                       netif_info(priv, link, ndev, "link is down\n");
+                       netif_carrier_off(ndev);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void w5100_set_rx_mode(struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+       bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
+
+       if (priv->promisc != set_promisc) {
+               priv->promisc = set_promisc;
+               w5100_hw_start(priv);
+       }
+}
+
+static int w5100_set_macaddr(struct net_device *ndev, void *addr)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+       struct sockaddr *sock_addr = addr;
+
+       if (!is_valid_ether_addr(sock_addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+       ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
+       w5100_write_macaddr(priv);
+       return 0;
+}
+
+static int w5100_open(struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       netif_info(priv, ifup, ndev, "enabling\n");
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               return -EINVAL;
+       w5100_hw_start(priv);
+       napi_enable(&priv->napi);
+       netif_start_queue(ndev);
+       if (!gpio_is_valid(priv->link_gpio) ||
+           gpio_get_value(priv->link_gpio) != 0)
+               netif_carrier_on(ndev);
+       return 0;
+}
+
+static int w5100_stop(struct net_device *ndev)
+{
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       netif_info(priv, ifdown, ndev, "shutting down\n");
+       w5100_hw_close(priv);
+       netif_carrier_off(ndev);
+       netif_stop_queue(ndev);
+       napi_disable(&priv->napi);
+       return 0;
+}
+
+static const struct ethtool_ops w5100_ethtool_ops = {
+       .get_drvinfo            = w5100_get_drvinfo,
+       .get_msglevel           = w5100_get_msglevel,
+       .set_msglevel           = w5100_set_msglevel,
+       .get_link               = w5100_get_link,
+       .get_regs_len           = w5100_get_regs_len,
+       .get_regs               = w5100_get_regs,
+};
+
+static const struct net_device_ops w5100_netdev_ops = {
+       .ndo_open               = w5100_open,
+       .ndo_stop               = w5100_stop,
+       .ndo_start_xmit         = w5100_start_tx,
+       .ndo_tx_timeout         = w5100_tx_timeout,
+       .ndo_set_rx_mode        = w5100_set_rx_mode,
+       .ndo_set_mac_address    = w5100_set_macaddr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+};
+
+static int __devinit w5100_hw_probe(struct platform_device *pdev)
+{
+       struct wiznet_platform_data *data = pdev->dev.platform_data;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5100_priv *priv = netdev_priv(ndev);
+       const char *name = netdev_name(ndev);
+       struct resource *mem;
+       int mem_size;
+       int irq;
+       int ret;
+
+       if (data && is_valid_ether_addr(data->mac_addr)) {
+               memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+       } else {
+               random_ether_addr(ndev->dev_addr);
+               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem)
+               return -ENXIO;
+       mem_size = resource_size(mem);
+       if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
+               return -EBUSY;
+       priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+       if (!priv->base)
+               return -EBUSY;
+
+       spin_lock_init(&priv->reg_lock);
+       priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
+       if (priv->indirect) {
+               priv->read     = w5100_read_indirect;
+               priv->write    = w5100_write_indirect;
+               priv->read16   = w5100_read16_indirect;
+               priv->write16  = w5100_write16_indirect;
+               priv->readbuf  = w5100_readbuf_indirect;
+               priv->writebuf = w5100_writebuf_indirect;
+       } else {
+               priv->read     = w5100_read_direct;
+               priv->write    = w5100_write_direct;
+               priv->read16   = w5100_read16_direct;
+               priv->write16  = w5100_write16_direct;
+               priv->readbuf  = w5100_readbuf_direct;
+               priv->writebuf = w5100_writebuf_direct;
+       }
+
+       w5100_hw_reset(priv);
+       if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
+               return -ENODEV;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+       ret = request_irq(irq, w5100_interrupt,
+                         IRQ_TYPE_LEVEL_LOW, name, ndev);
+       if (ret < 0)
+               return ret;
+       priv->irq = irq;
+
+       priv->link_gpio = data->link_gpio;
+       if (gpio_is_valid(priv->link_gpio)) {
+               char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
+               if (!link_name)
+                       return -ENOMEM;
+               snprintf(link_name, 16, "%s-link", name);
+               priv->link_irq = gpio_to_irq(priv->link_gpio);
+               if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                               link_name, priv->ndev) < 0)
+                       priv->link_gpio = -EINVAL;
+       }
+
+       netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
+       return 0;
+}
+
+static int __devinit w5100_probe(struct platform_device *pdev)
+{
+       struct w5100_priv *priv;
+       struct net_device *ndev;
+       int err;
+
+       ndev = alloc_etherdev(sizeof(*priv));
+       if (!ndev)
+               return -ENOMEM;
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       platform_set_drvdata(pdev, ndev);
+       priv = netdev_priv(ndev);
+       priv->ndev = ndev;
+
+       ether_setup(ndev);
+       ndev->netdev_ops = &w5100_netdev_ops;
+       ndev->ethtool_ops = &w5100_ethtool_ops;
+       ndev->watchdog_timeo = HZ;
+       netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
+
+       /* This chip doesn't support VLAN packets with normal MTU,
+        * so disable VLAN for this device.
+        */
+       ndev->features |= NETIF_F_VLAN_CHALLENGED;
+
+       err = register_netdev(ndev);
+       if (err < 0)
+               goto err_register;
+
+       err = w5100_hw_probe(pdev);
+       if (err < 0)
+               goto err_hw_probe;
+
+       return 0;
+
+err_hw_probe:
+       unregister_netdev(ndev);
+err_register:
+       free_netdev(ndev);
+       platform_set_drvdata(pdev, NULL);
+       return err;
+}
+
+static int __devexit w5100_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       w5100_hw_reset(priv);
+       free_irq(priv->irq, ndev);
+       if (gpio_is_valid(priv->link_gpio))
+               free_irq(priv->link_irq, ndev);
+
+       unregister_netdev(ndev);
+       free_netdev(ndev);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int w5100_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       if (netif_running(ndev)) {
+               netif_carrier_off(ndev);
+               netif_device_detach(ndev);
+
+               w5100_hw_close(priv);
+       }
+       return 0;
+}
+
+static int w5100_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5100_priv *priv = netdev_priv(ndev);
+
+       if (netif_running(ndev)) {
+               w5100_hw_reset(priv);
+               w5100_hw_start(priv);
+
+               netif_device_attach(ndev);
+               if (!gpio_is_valid(priv->link_gpio) ||
+                   gpio_get_value(priv->link_gpio) != 0)
+                       netif_carrier_on(ndev);
+       }
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+
+static struct platform_driver w5100_driver = {
+       .driver         = {
+               .name   = DRV_NAME,
+               .owner  = THIS_MODULE,
+               .pm     = &w5100_pm_ops,
+       },
+       .probe          = w5100_probe,
+       .remove         = __devexit_p(w5100_remove),
+};
+
+module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
new file mode 100644 (file)
index 0000000..f36addf
--- /dev/null
@@ -0,0 +1,720 @@
+/*
+ * Ethernet driver for the WIZnet W5300 chip.
+ *
+ * Copyright (C) 2008-2009 WIZnet Co.,Ltd.
+ * Copyright (C) 2011 Taehun Kim <kth3321 <at> gmail.com>
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kconfig.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/wiznet.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#define DRV_NAME       "w5300"
+#define DRV_VERSION    "2012-04-04"
+
+MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION);
+MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
+MODULE_ALIAS("platform:"DRV_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * Registers
+ */
+#define W5300_MR               0x0000  /* Mode Register */
+#define   MR_DBW                 (1 << 15) /* Data bus width */
+#define   MR_MPF                 (1 << 14) /* Mac layer pause frame */
+#define   MR_WDF(n)              (((n)&7)<<11) /* Write data fetch time */
+#define   MR_RDH                 (1 << 10) /* Read data hold time */
+#define   MR_FS                          (1 << 8)  /* FIFO swap */
+#define   MR_RST                 (1 << 7)  /* S/W reset */
+#define   MR_PB                          (1 << 4)  /* Ping block */
+#define   MR_DBS                 (1 << 2)  /* Data bus swap */
+#define   MR_IND                 (1 << 0)  /* Indirect mode */
+#define W5300_IR               0x0002  /* Interrupt Register */
+#define W5300_IMR              0x0004  /* Interrupt Mask Register */
+#define   IR_S0                          0x0001  /* S0 interrupt */
+#define W5300_SHARL            0x0008  /* Source MAC address (0123) */
+#define W5300_SHARH            0x000c  /* Source MAC address (45) */
+#define W5300_TMSRL            0x0020  /* Transmit Memory Size (0123) */
+#define W5300_TMSRH            0x0024  /* Transmit Memory Size (4567) */
+#define W5300_RMSRL            0x0028  /* Receive Memory Size (0123) */
+#define W5300_RMSRH            0x002c  /* Receive Memory Size (4567) */
+#define W5300_MTYPE            0x0030  /* Memory Type */
+#define W5300_IDR              0x00fe  /* Chip ID register */
+#define   IDR_W5300              0x5300  /* =0x5300 for WIZnet W5300 */
+#define W5300_S0_MR            0x0200  /* S0 Mode Register */
+#define   S0_MR_CLOSED           0x0000  /* Close mode */
+#define   S0_MR_MACRAW           0x0004  /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW_MF        0x0044  /* MAC RAW mode (filtered) */
+#define W5300_S0_CR            0x0202  /* S0 Command Register */
+#define   S0_CR_OPEN             0x0001  /* OPEN command */
+#define   S0_CR_CLOSE            0x0010  /* CLOSE command */
+#define   S0_CR_SEND             0x0020  /* SEND command */
+#define   S0_CR_RECV             0x0040  /* RECV command */
+#define W5300_S0_IMR           0x0204  /* S0 Interrupt Mask Register */
+#define W5300_S0_IR            0x0206  /* S0 Interrupt Register */
+#define   S0_IR_RECV             0x0004  /* Receive interrupt */
+#define   S0_IR_SENDOK           0x0010  /* Send OK interrupt */
+#define W5300_S0_SSR           0x0208  /* S0 Socket Status Register */
+#define W5300_S0_TX_WRSR       0x0220  /* S0 TX Write Size Register */
+#define W5300_S0_TX_FSR                0x0224  /* S0 TX Free Size Register */
+#define W5300_S0_RX_RSR                0x0228  /* S0 Received data Size */
+#define W5300_S0_TX_FIFO       0x022e  /* S0 Transmit FIFO */
+#define W5300_S0_RX_FIFO       0x0230  /* S0 Receive FIFO */
+#define W5300_REGS_LEN         0x0400
+
+/*
+ * Device driver private data structure
+ */
+struct w5300_priv {
+       void __iomem *base;
+       spinlock_t reg_lock;
+       bool indirect;
+       u16  (*read) (struct w5300_priv *priv, u16 addr);
+       void (*write)(struct w5300_priv *priv, u16 addr, u16 data);
+       int irq;
+       int link_irq;
+       int link_gpio;
+
+       struct napi_struct napi;
+       struct net_device *ndev;
+       bool promisc;
+       u32 msg_enable;
+};
+
+/************************************************************************
+ *
+ *  Lowlevel I/O functions
+ *
+ ***********************************************************************/
+
+/*
+ * In direct address mode host system can directly access W5300 registers
+ * after mapping to Memory-Mapped I/O space.
+ *
+ * 0x400 bytes are required for memory space.
+ */
+static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr)
+{
+       return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static inline void w5300_write_direct(struct w5300_priv *priv,
+                                     u16 addr, u16 data)
+{
+       iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+/*
+ * In indirect address mode host system indirectly accesses registers by
+ * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
+ * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
+ * Mode Register (MR) is directly accessible.
+ *
+ * Only 0x06 bytes are required for memory space.
+ */
+#define W5300_IDM_AR           0x0002   /* Indirect Mode Address */
+#define W5300_IDM_DR           0x0004   /* Indirect Mode Data */
+
+static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
+{
+       unsigned long flags;
+       u16 data;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5300_write_direct(priv, W5300_IDM_AR, addr);
+       mmiowb();
+       data = w5300_read_direct(priv, W5300_IDM_DR);
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+       return data;
+}
+
+static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       w5300_write_direct(priv, W5300_IDM_AR, addr);
+       mmiowb();
+       w5300_write_direct(priv, W5300_IDM_DR, data);
+       mmiowb();
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+#if defined(CONFIG_WIZNET_BUS_DIRECT)
+#define w5300_read     w5300_read_direct
+#define w5300_write    w5300_write_direct
+
+#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
+#define w5300_read     w5300_read_indirect
+#define w5300_write    w5300_write_indirect
+
+#else /* CONFIG_WIZNET_BUS_ANY */
+#define w5300_read     priv->read
+#define w5300_write    priv->write
+#endif
+
+static u32 w5300_read32(struct w5300_priv *priv, u16 addr)
+{
+       u32 data;
+       data  = w5300_read(priv, addr) << 16;
+       data |= w5300_read(priv, addr + 2);
+       return data;
+}
+
+static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data)
+{
+       w5300_write(priv, addr, data >> 16);
+       w5300_write(priv, addr + 2, data);
+}
+
+static int w5300_command(struct w5300_priv *priv, u16 cmd)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+       w5300_write(priv, W5300_S0_CR, cmd);
+       mmiowb();
+
+       while (w5300_read(priv, W5300_S0_CR) != 0) {
+               if (time_after(jiffies, timeout))
+                       return -EIO;
+               cpu_relax();
+       }
+
+       return 0;
+}
+
+static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len)
+{
+       u16 fifo;
+       int i;
+
+       for (i = 0; i < len; i += 2) {
+               fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+               *buf++ = fifo >> 8;
+               *buf++ = fifo;
+       }
+       fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+       fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+}
+
+static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len)
+{
+       u16 fifo;
+       int i;
+
+       for (i = 0; i < len; i += 2) {
+               fifo  = *buf++ << 8;
+               fifo |= *buf++;
+               w5300_write(priv, W5300_S0_TX_FIFO, fifo);
+       }
+       w5300_write32(priv, W5300_S0_TX_WRSR, len);
+}
+
+static void w5300_write_macaddr(struct w5300_priv *priv)
+{
+       struct net_device *ndev = priv->ndev;
+       w5300_write32(priv, W5300_SHARL,
+                     ndev->dev_addr[0] << 24 |
+                     ndev->dev_addr[1] << 16 |
+                     ndev->dev_addr[2] << 8 |
+                     ndev->dev_addr[3]);
+       w5300_write(priv, W5300_SHARH,
+                     ndev->dev_addr[4] << 8 |
+                     ndev->dev_addr[5]);
+       mmiowb();
+}
+
+static void w5300_hw_reset(struct w5300_priv *priv)
+{
+       w5300_write_direct(priv, W5300_MR, MR_RST);
+       mmiowb();
+       mdelay(5);
+       w5300_write_direct(priv, W5300_MR, priv->indirect ?
+                                MR_WDF(7) | MR_PB | MR_IND :
+                                MR_WDF(7) | MR_PB);
+       mmiowb();
+       w5300_write(priv, W5300_IMR, 0);
+       w5300_write_macaddr(priv);
+
+       /* Configure 128K of internal memory
+        * as 64K RX fifo and 64K TX fifo
+        */
+       w5300_write32(priv, W5300_RMSRL, 64 << 24);
+       w5300_write32(priv, W5300_RMSRH, 0);
+       w5300_write32(priv, W5300_TMSRL, 64 << 24);
+       w5300_write32(priv, W5300_TMSRH, 0);
+       w5300_write(priv, W5300_MTYPE, 0x00ff);
+       mmiowb();
+}
+
+static void w5300_hw_start(struct w5300_priv *priv)
+{
+       w5300_write(priv, W5300_S0_MR, priv->promisc ?
+                         S0_MR_MACRAW : S0_MR_MACRAW_MF);
+       mmiowb();
+       w5300_command(priv, S0_CR_OPEN);
+       w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
+       w5300_write(priv, W5300_IMR, IR_S0);
+       mmiowb();
+}
+
+static void w5300_hw_close(struct w5300_priv *priv)
+{
+       w5300_write(priv, W5300_IMR, 0);
+       mmiowb();
+       w5300_command(priv, S0_CR_CLOSE);
+}
+
+/***********************************************************************
+ *
+ *   Device driver functions / callbacks
+ *
+ ***********************************************************************/
+
+static void w5300_get_drvinfo(struct net_device *ndev,
+                             struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+               sizeof(info->bus_info));
+}
+
+static u32 w5300_get_link(struct net_device *ndev)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       if (gpio_is_valid(priv->link_gpio))
+               return !!gpio_get_value(priv->link_gpio);
+
+       return 1;
+}
+
+static u32 w5300_get_msglevel(struct net_device *ndev)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       return priv->msg_enable;
+}
+
+static void w5300_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       priv->msg_enable = value;
+}
+
+static int w5300_get_regs_len(struct net_device *ndev)
+{
+       return W5300_REGS_LEN;
+}
+
+static void w5300_get_regs(struct net_device *ndev,
+                          struct ethtool_regs *regs, void *_buf)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+       u8 *buf = _buf;
+       u16 addr;
+       u16 data;
+
+       regs->version = 1;
+       for (addr = 0; addr < W5300_REGS_LEN; addr += 2) {
+               switch (addr & 0x23f) {
+               case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */
+               case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */
+                       data = 0xffff;
+                       break;
+               default:
+                       data = w5300_read(priv, addr);
+                       break;
+               }
+               *buf++ = data >> 8;
+               *buf++ = data;
+       }
+}
+
+static void w5300_tx_timeout(struct net_device *ndev)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       w5300_hw_reset(priv);
+       w5300_hw_start(priv);
+       ndev->stats.tx_errors++;
+       ndev->trans_start = jiffies;
+       netif_wake_queue(ndev);
+}
+
+static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+
+       w5300_write_frame(priv, skb->data, skb->len);
+       mmiowb();
+       ndev->stats.tx_packets++;
+       ndev->stats.tx_bytes += skb->len;
+       dev_kfree_skb(skb);
+       netif_dbg(priv, tx_queued, ndev, "tx queued\n");
+
+       w5300_command(priv, S0_CR_SEND);
+
+       return NETDEV_TX_OK;
+}
+
+static int w5300_napi_poll(struct napi_struct *napi, int budget)
+{
+       struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi);
+       struct net_device *ndev = priv->ndev;
+       struct sk_buff *skb;
+       int rx_count;
+       u16 rx_len;
+
+       for (rx_count = 0; rx_count < budget; rx_count++) {
+               u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR);
+               if (rx_fifo_len == 0)
+                       break;
+
+               rx_len = w5300_read(priv, W5300_S0_RX_FIFO);
+
+               skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2));
+               if (unlikely(!skb)) {
+                       u32 i;
+                       for (i = 0; i < rx_fifo_len; i += 2)
+                               w5300_read(priv, W5300_S0_RX_FIFO);
+                       ndev->stats.rx_dropped++;
+                       return -ENOMEM;
+               }
+
+               skb_put(skb, rx_len);
+               w5300_read_frame(priv, skb->data, rx_len);
+               skb->protocol = eth_type_trans(skb, ndev);
+
+               netif_receive_skb(skb);
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += rx_len;
+       }
+
+       if (rx_count < budget) {
+               w5300_write(priv, W5300_IMR, IR_S0);
+               mmiowb();
+               napi_complete(napi);
+       }
+
+       return rx_count;
+}
+
+static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
+{
+       struct net_device *ndev = ndev_instance;
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       int ir = w5300_read(priv, W5300_S0_IR);
+       if (!ir)
+               return IRQ_NONE;
+       w5300_write(priv, W5300_S0_IR, ir);
+       mmiowb();
+
+       if (ir & S0_IR_SENDOK) {
+               netif_dbg(priv, tx_done, ndev, "tx done\n");
+               netif_wake_queue(ndev);
+       }
+
+       if (ir & S0_IR_RECV) {
+               if (napi_schedule_prep(&priv->napi)) {
+                       w5300_write(priv, W5300_IMR, 0);
+                       mmiowb();
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t w5300_detect_link(int irq, void *ndev_instance)
+{
+       struct net_device *ndev = ndev_instance;
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       if (netif_running(ndev)) {
+               if (gpio_get_value(priv->link_gpio) != 0) {
+                       netif_info(priv, link, ndev, "link is up\n");
+                       netif_carrier_on(ndev);
+               } else {
+                       netif_info(priv, link, ndev, "link is down\n");
+                       netif_carrier_off(ndev);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void w5300_set_rx_mode(struct net_device *ndev)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+       bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
+
+       if (priv->promisc != set_promisc) {
+               priv->promisc = set_promisc;
+               w5300_hw_start(priv);
+       }
+}
+
+static int w5300_set_macaddr(struct net_device *ndev, void *addr)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+       struct sockaddr *sock_addr = addr;
+
+       if (!is_valid_ether_addr(sock_addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+       ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
+       w5300_write_macaddr(priv);
+       return 0;
+}
+
+static int w5300_open(struct net_device *ndev)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       netif_info(priv, ifup, ndev, "enabling\n");
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               return -EINVAL;
+       w5300_hw_start(priv);
+       napi_enable(&priv->napi);
+       netif_start_queue(ndev);
+       if (!gpio_is_valid(priv->link_gpio) ||
+           gpio_get_value(priv->link_gpio) != 0)
+               netif_carrier_on(ndev);
+       return 0;
+}
+
+static int w5300_stop(struct net_device *ndev)
+{
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       netif_info(priv, ifdown, ndev, "shutting down\n");
+       w5300_hw_close(priv);
+       netif_carrier_off(ndev);
+       netif_stop_queue(ndev);
+       napi_disable(&priv->napi);
+       return 0;
+}
+
+static const struct ethtool_ops w5300_ethtool_ops = {
+       .get_drvinfo            = w5300_get_drvinfo,
+       .get_msglevel           = w5300_get_msglevel,
+       .set_msglevel           = w5300_set_msglevel,
+       .get_link               = w5300_get_link,
+       .get_regs_len           = w5300_get_regs_len,
+       .get_regs               = w5300_get_regs,
+};
+
+static const struct net_device_ops w5300_netdev_ops = {
+       .ndo_open               = w5300_open,
+       .ndo_stop               = w5300_stop,
+       .ndo_start_xmit         = w5300_start_tx,
+       .ndo_tx_timeout         = w5300_tx_timeout,
+       .ndo_set_rx_mode        = w5300_set_rx_mode,
+       .ndo_set_mac_address    = w5300_set_macaddr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+};
+
+static int __devinit w5300_hw_probe(struct platform_device *pdev)
+{
+       struct wiznet_platform_data *data = pdev->dev.platform_data;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5300_priv *priv = netdev_priv(ndev);
+       const char *name = netdev_name(ndev);
+       struct resource *mem;
+       int mem_size;
+       int irq;
+       int ret;
+
+       if (data && is_valid_ether_addr(data->mac_addr)) {
+               memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+       } else {
+               random_ether_addr(ndev->dev_addr);
+               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem)
+               return -ENXIO;
+       mem_size = resource_size(mem);
+       if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
+               return -EBUSY;
+       priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+       if (!priv->base)
+               return -EBUSY;
+
+       spin_lock_init(&priv->reg_lock);
+       priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
+       if (priv->indirect) {
+               priv->read  = w5300_read_indirect;
+               priv->write = w5300_write_indirect;
+       } else {
+               priv->read  = w5300_read_direct;
+               priv->write = w5300_write_direct;
+       }
+
+       w5300_hw_reset(priv);
+       if (w5300_read(priv, W5300_IDR) != IDR_W5300)
+               return -ENODEV;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+       ret = request_irq(irq, w5300_interrupt,
+                         IRQ_TYPE_LEVEL_LOW, name, ndev);
+       if (ret < 0)
+               return ret;
+       priv->irq = irq;
+
+       priv->link_gpio = data->link_gpio;
+       if (gpio_is_valid(priv->link_gpio)) {
+               char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
+               if (!link_name)
+                       return -ENOMEM;
+               snprintf(link_name, 16, "%s-link", name);
+               priv->link_irq = gpio_to_irq(priv->link_gpio);
+               if (request_any_context_irq(priv->link_irq, w5300_detect_link,
+                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                               link_name, priv->ndev) < 0)
+                       priv->link_gpio = -EINVAL;
+       }
+
+       netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
+       return 0;
+}
+
+static int __devinit w5300_probe(struct platform_device *pdev)
+{
+       struct w5300_priv *priv;
+       struct net_device *ndev;
+       int err;
+
+       ndev = alloc_etherdev(sizeof(*priv));
+       if (!ndev)
+               return -ENOMEM;
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       platform_set_drvdata(pdev, ndev);
+       priv = netdev_priv(ndev);
+       priv->ndev = ndev;
+
+       ether_setup(ndev);
+       ndev->netdev_ops = &w5300_netdev_ops;
+       ndev->ethtool_ops = &w5300_ethtool_ops;
+       ndev->watchdog_timeo = HZ;
+       netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16);
+
+       /* This chip doesn't support VLAN packets with normal MTU,
+        * so disable VLAN for this device.
+        */
+       ndev->features |= NETIF_F_VLAN_CHALLENGED;
+
+       err = register_netdev(ndev);
+       if (err < 0)
+               goto err_register;
+
+       err = w5300_hw_probe(pdev);
+       if (err < 0)
+               goto err_hw_probe;
+
+       return 0;
+
+err_hw_probe:
+       unregister_netdev(ndev);
+err_register:
+       free_netdev(ndev);
+       platform_set_drvdata(pdev, NULL);
+       return err;
+}
+
+static int __devexit w5300_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       w5300_hw_reset(priv);
+       free_irq(priv->irq, ndev);
+       if (gpio_is_valid(priv->link_gpio))
+               free_irq(priv->link_irq, ndev);
+
+       unregister_netdev(ndev);
+       free_netdev(ndev);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int w5300_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       if (netif_running(ndev)) {
+               netif_carrier_off(ndev);
+               netif_device_detach(ndev);
+
+               w5300_hw_close(priv);
+       }
+       return 0;
+}
+
+static int w5300_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct w5300_priv *priv = netdev_priv(ndev);
+
+       if (!netif_running(ndev)) {
+               w5300_hw_reset(priv);
+               w5300_hw_start(priv);
+
+               netif_device_attach(ndev);
+               if (!gpio_is_valid(priv->link_gpio) ||
+                   gpio_get_value(priv->link_gpio) != 0)
+                       netif_carrier_on(ndev);
+       }
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
+
+static struct platform_driver w5300_driver = {
+       .driver         = {
+               .name   = DRV_NAME,
+               .owner  = THIS_MODULE,
+               .pm     = &w5300_pm_ops,
+       },
+       .probe          = w5300_probe,
+       .remove         = __devexit_p(w5300_remove),
+};
+
+module_platform_driver(w5300_driver);
index d21591a2c593df51f7011623bc42405e2e65eadd..1eaf7128afeefa66cae6d1f96c9b2d496fcea023 100644 (file)
@@ -1000,6 +1000,7 @@ static const struct ethtool_ops temac_ethtool_ops = {
        .set_settings = temac_set_settings,
        .nway_reset = temac_nway_reset,
        .get_link = ethtool_op_get_link,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 static int __devinit temac_of_probe(struct platform_device *op)
index cf67352cea14cfe2392de7b7cfa3139537a5c0a2..3f431019e615fd80ebcfa9954a043b8b3513e6d1 100644 (file)
@@ -5,8 +5,8 @@
 config NET_VENDOR_XSCALE
        bool "Intel XScale IXP devices"
        default y
-       depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \
-                  IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611)
+       depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
+                  IXP4XX_NPE && IXP4XX_QMGR)
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -27,6 +27,4 @@ config IXP4XX_ETH
          Say Y here if you want to use built-in Ethernet ports
          on IXP4xx processor.
 
-source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
-
 endif # NET_VENDOR_XSCALE
index b195b9d7fe8133b4c52c8e63fed200fae2213400..abc3b031fba718783ed6eb347d36cfa3f01fcc76 100644 (file)
@@ -2,5 +2,4 @@
 # Makefile for the Intel XScale IXP device drivers.
 #
 
-obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
 obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
deleted file mode 100644 (file)
index 58dbc5b..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-config ENP2611_MSF_NET
-       tristate "Radisys ENP2611 MSF network interface support"
-       depends on ARCH_ENP2611
-       ---help---
-         This is a driver for the MSF network interface unit in
-         the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
deleted file mode 100644 (file)
index fd38351..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
-
-enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
deleted file mode 100644 (file)
index 7dea5b9..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include "caleb.h"
-
-#define CALEB_IDLO             0x00
-#define CALEB_IDHI             0x01
-#define CALEB_RID              0x02
-#define CALEB_RESET            0x03
-#define CALEB_INTREN0          0x04
-#define CALEB_INTREN1          0x05
-#define CALEB_INTRSTAT0                0x06
-#define CALEB_INTRSTAT1                0x07
-#define CALEB_PORTEN           0x08
-#define CALEB_BURST            0x09
-#define CALEB_PORTPAUS         0x0A
-#define CALEB_PORTPAUSD                0x0B
-#define CALEB_PHY0RX           0x10
-#define CALEB_PHY1RX           0x11
-#define CALEB_PHY0TX           0x12
-#define CALEB_PHY1TX           0x13
-#define CALEB_IXPRX_HI_CNTR    0x15
-#define CALEB_PHY0RX_HI_CNTR   0x16
-#define CALEB_PHY1RX_HI_CNTR   0x17
-#define CALEB_IXPRX_CNTR       0x18
-#define CALEB_PHY0RX_CNTR      0x19
-#define CALEB_PHY1RX_CNTR      0x1A
-#define CALEB_IXPTX_CNTR       0x1B
-#define CALEB_PHY0TX_CNTR      0x1C
-#define CALEB_PHY1TX_CNTR      0x1D
-#define CALEB_DEBUG0           0x1E
-#define CALEB_DEBUG1           0x1F
-
-
-static u8 caleb_reg_read(int reg)
-{
-       u8 value;
-
-       value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
-
-//     printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
-
-       return value;
-}
-
-static void caleb_reg_write(int reg, u8 value)
-{
-       u8 dummy;
-
-//     printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
-
-       *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
-
-       dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
-       __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
-}
-
-
-void caleb_reset(void)
-{
-       /*
-        * Perform a chip reset.
-        */
-       caleb_reg_write(CALEB_RESET, 0x02);
-       udelay(1);
-
-       /*
-        * Enable all interrupt sources.  This is needed to get
-        * meaningful results out of the status bits (register 6
-        * and 7.)
-        */
-       caleb_reg_write(CALEB_INTREN0, 0xff);
-       caleb_reg_write(CALEB_INTREN1, 0x07);
-
-       /*
-        * Set RX and TX FIFO thresholds to 1.5kb.
-        */
-       caleb_reg_write(CALEB_PHY0RX, 0x11);
-       caleb_reg_write(CALEB_PHY1RX, 0x11);
-       caleb_reg_write(CALEB_PHY0TX, 0x11);
-       caleb_reg_write(CALEB_PHY1TX, 0x11);
-
-       /*
-        * Program SPI-3 burst size.
-        */
-       caleb_reg_write(CALEB_BURST, 0);        // 64-byte RBUF mpackets
-//     caleb_reg_write(CALEB_BURST, 1);        // 128-byte RBUF mpackets
-//     caleb_reg_write(CALEB_BURST, 2);        // 256-byte RBUF mpackets
-}
-
-void caleb_enable_rx(int port)
-{
-       u8 temp;
-
-       temp = caleb_reg_read(CALEB_PORTEN);
-       temp |= 1 << port;
-       caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_disable_rx(int port)
-{
-       u8 temp;
-
-       temp = caleb_reg_read(CALEB_PORTEN);
-       temp &= ~(1 << port);
-       caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_enable_tx(int port)
-{
-       u8 temp;
-
-       temp = caleb_reg_read(CALEB_PORTEN);
-       temp |= 1 << (port + 4);
-       caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_disable_tx(int port)
-{
-       u8 temp;
-
-       temp = caleb_reg_read(CALEB_PORTEN);
-       temp &= ~(1 << (port + 4));
-       caleb_reg_write(CALEB_PORTEN, temp);
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
deleted file mode 100644 (file)
index e93a1ef..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __CALEB_H
-#define __CALEB_H
-
-void caleb_reset(void);
-void caleb_enable_rx(int port);
-void caleb_disable_rx(int port);
-void caleb_enable_tx(int port);
-void caleb_disable_tx(int port);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
deleted file mode 100644 (file)
index 34a6cfd..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * IXP2400 MSF network device driver for the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <asm/hardware/uengine.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-#include "ixpdev.h"
-#include "caleb.h"
-#include "ixp2400-msf.h"
-#include "pm3386.h"
-
-/***********************************************************************
- * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
- * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
- * to the IXP2400.
- *
- *                +-------------+
- * SFP GBIC #0 ---+             |       +---------+
- *                |  PM3386 #0  +-------+         |
- * SFP GBIC #1 ---+             |       | "Caleb" |         +---------+
- *                +-------------+       |         |         |         |
- *                                      | SPI-3   +---------+ IXP2400 |
- *                +-------------+       | bridge  |         |         |
- * SFP GBIC #2 ---+             |       | FPGA    |         +---------+
- *                |  PM3386 #1  +-------+         |
- *                |             |       +---------+
- *                +-------------+
- *              ^                   ^                  ^
- *              | 1.25Gbaud         | 104MHz           | 104MHz
- *              | SERDES ea.        | SPI-3 ea.        | SPI-3
- *
- ***********************************************************************/
-static struct ixp2400_msf_parameters enp2611_msf_parameters =
-{
-       .rx_mode =              IXP2400_RX_MODE_UTOPIA_POS |
-                               IXP2400_RX_MODE_1x32 |
-                               IXP2400_RX_MODE_MPHY |
-                               IXP2400_RX_MODE_MPHY_32 |
-                               IXP2400_RX_MODE_MPHY_POLLED_STATUS |
-                               IXP2400_RX_MODE_MPHY_LEVEL3 |
-                               IXP2400_RX_MODE_RBUF_SIZE_64,
-
-       .rxclk01_multiplier =   IXP2400_PLL_MULTIPLIER_16,
-
-       .rx_poll_ports =        3,
-
-       .rx_channel_mode = {
-               IXP2400_PORT_RX_MODE_MASTER |
-               IXP2400_PORT_RX_MODE_POS_PHY |
-               IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-               IXP2400_PORT_RX_MODE_ODD_PARITY |
-               IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-               IXP2400_PORT_RX_MODE_MASTER |
-               IXP2400_PORT_RX_MODE_POS_PHY |
-               IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-               IXP2400_PORT_RX_MODE_ODD_PARITY |
-               IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-               IXP2400_PORT_RX_MODE_MASTER |
-               IXP2400_PORT_RX_MODE_POS_PHY |
-               IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-               IXP2400_PORT_RX_MODE_ODD_PARITY |
-               IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-               IXP2400_PORT_RX_MODE_MASTER |
-               IXP2400_PORT_RX_MODE_POS_PHY |
-               IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-               IXP2400_PORT_RX_MODE_ODD_PARITY |
-               IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
-       },
-
-       .tx_mode =              IXP2400_TX_MODE_UTOPIA_POS |
-                               IXP2400_TX_MODE_1x32 |
-                               IXP2400_TX_MODE_MPHY |
-                               IXP2400_TX_MODE_MPHY_32 |
-                               IXP2400_TX_MODE_MPHY_POLLED_STATUS |
-                               IXP2400_TX_MODE_MPHY_LEVEL3 |
-                               IXP2400_TX_MODE_TBUF_SIZE_64,
-
-       .txclk01_multiplier =   IXP2400_PLL_MULTIPLIER_16,
-
-       .tx_poll_ports =        3,
-
-       .tx_channel_mode = {
-               IXP2400_PORT_TX_MODE_MASTER |
-               IXP2400_PORT_TX_MODE_POS_PHY |
-               IXP2400_PORT_TX_MODE_ODD_PARITY |
-               IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-               IXP2400_PORT_TX_MODE_MASTER |
-               IXP2400_PORT_TX_MODE_POS_PHY |
-               IXP2400_PORT_TX_MODE_ODD_PARITY |
-               IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-               IXP2400_PORT_TX_MODE_MASTER |
-               IXP2400_PORT_TX_MODE_POS_PHY |
-               IXP2400_PORT_TX_MODE_ODD_PARITY |
-               IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-               IXP2400_PORT_TX_MODE_MASTER |
-               IXP2400_PORT_TX_MODE_POS_PHY |
-               IXP2400_PORT_TX_MODE_ODD_PARITY |
-               IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
-       }
-};
-
-static struct net_device *nds[3];
-static struct timer_list link_check_timer;
-
-/* @@@ Poll the SFP moddef0 line too.  */
-/* @@@ Try to use the pm3386 DOOL interrupt as well.  */
-static void enp2611_check_link_status(unsigned long __dummy)
-{
-       int i;
-
-       for (i = 0; i < 3; i++) {
-               struct net_device *dev;
-               int status;
-
-               dev = nds[i];
-               if (dev == NULL)
-                       continue;
-
-               status = pm3386_is_link_up(i);
-               if (status && !netif_carrier_ok(dev)) {
-                       /* @@@ Should report autonegotiation status.  */
-                       printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
-
-                       pm3386_enable_tx(i);
-                       caleb_enable_tx(i);
-                       netif_carrier_on(dev);
-               } else if (!status && netif_carrier_ok(dev)) {
-                       printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
-
-                       netif_carrier_off(dev);
-                       caleb_disable_tx(i);
-                       pm3386_disable_tx(i);
-               }
-       }
-
-       link_check_timer.expires = jiffies + HZ / 10;
-       add_timer(&link_check_timer);
-}
-
-static void enp2611_set_port_admin_status(int port, int up)
-{
-       if (up) {
-               caleb_enable_rx(port);
-
-               pm3386_set_carrier(port, 1);
-               pm3386_enable_rx(port);
-       } else {
-               caleb_disable_tx(port);
-               pm3386_disable_tx(port);
-               /* @@@ Flush out pending packets.  */
-               pm3386_set_carrier(port, 0);
-
-               pm3386_disable_rx(port);
-               caleb_disable_rx(port);
-       }
-}
-
-static int __init enp2611_init_module(void)
-{ 
-       int ports;
-       int i;
-
-       if (!machine_is_enp2611())
-               return -ENODEV;
-
-       caleb_reset();
-       pm3386_reset();
-
-       ports = pm3386_port_count();
-       for (i = 0; i < ports; i++) {
-               nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
-               if (nds[i] == NULL) {
-                       while (--i >= 0)
-                               free_netdev(nds[i]);
-                       return -ENOMEM;
-               }
-
-               pm3386_init_port(i);
-               pm3386_get_mac(i, nds[i]->dev_addr);
-       }
-
-       ixp2400_msf_init(&enp2611_msf_parameters);
-
-       if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
-               for (i = 0; i < ports; i++)
-                       if (nds[i])
-                               free_netdev(nds[i]);
-               return -EINVAL;
-       }
-
-       init_timer(&link_check_timer);
-       link_check_timer.function = enp2611_check_link_status;
-       link_check_timer.expires = jiffies;
-       add_timer(&link_check_timer);
-
-       return 0;
-}
-
-static void __exit enp2611_cleanup_module(void)
-{
-       int i;
-
-       del_timer_sync(&link_check_timer);
-
-       ixpdev_deinit();
-       for (i = 0; i < 3; i++)
-               free_netdev(nds[i]);
-}
-
-module_init(enp2611_init_module);
-module_exit(enp2611_cleanup_module);
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
deleted file mode 100644 (file)
index f5ffd7e..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Generic library functions for the MSF (Media and Switch Fabric) unit
- * found on the Intel IXP2400 network processor.
- *
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of the
- * License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <mach/hardware.h>
-#include <mach/ixp2000-regs.h>
-#include <asm/delay.h>
-#include <asm/io.h>
-#include "ixp2400-msf.h"
-
-/*
- * This is the Intel recommended PLL init procedure as described on
- * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
- */
-static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
-{
-       int rx_dual_clock;
-       int tx_dual_clock;
-       u32 value;
-
-       /*
-        * If the RX mode is not 1x32, we have to enable both RX PLLs
-        * (#0 and #1.)  The same thing for the TX direction.
-        */
-       rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
-       tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
-
-       /*
-        * Read initial value.
-        */
-       value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
-
-       /*
-        * Put PLLs in powerdown and bypass mode.
-        */
-       value |= 0x0000f0f0;
-       ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-       /*
-        * Set single or dual clock mode bits.
-        */
-       value &= ~0x03000000;
-       value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
-
-       /*
-        * Set multipliers.
-        */
-       value &= ~0x00ff0000;
-       value |= mp->rxclk01_multiplier << 16;
-       value |= mp->rxclk23_multiplier << 18;
-       value |= mp->txclk01_multiplier << 20;
-       value |= mp->txclk23_multiplier << 22;
-
-       /*
-        * And write value.
-        */
-       ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-       /*
-        * Disable PLL bypass mode.
-        */
-       value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
-       ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-       /*
-        * Turn on PLLs.
-        */
-       value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
-       ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-       /*
-        * Wait for PLLs to lock.  There are lock status bits, but IXP2400
-        * erratum #65 says that these lock bits should not be relied upon
-        * as they might not accurately reflect the true state of the PLLs.
-        */
-       udelay(100);
-}
-
-/*
- * Needed according to p480 of Programmer's Reference Manual.
- */
-static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
-{
-       int size_bits;
-       int i;
-
-       /*
-        * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
-        * corruption) in the Intel-recommended way: do not add the RBUF
-        * elements susceptible to corruption to the freelist.
-        */
-       size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
-       if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
-               for (i = 1; i < 128; i++) {
-                       if (i == 9 || i == 18 || i == 27)
-                               continue;
-                       ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-               }
-       } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
-               for (i = 1; i < 64; i++) {
-                       if (i == 4 || i == 9 || i == 13)
-                               continue;
-                       ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-               }
-       } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
-               for (i = 1; i < 32; i++) {
-                       if (i == 2 || i == 4 || i == 6)
-                               continue;
-                       ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-               }
-       }
-}
-
-static u32 ixp2400_msf_valid_channels(u32 reg)
-{
-       u32 channels;
-
-       channels = 0;
-       switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
-       case IXP2400_RX_MODE_1x32:
-               channels = 0x1;
-               if (reg & IXP2400_RX_MODE_MPHY &&
-                   !(reg & IXP2400_RX_MODE_MPHY_32))
-                       channels = 0xf;
-               break;
-
-       case IXP2400_RX_MODE_2x16:
-               channels = 0x5;
-               break;
-
-       case IXP2400_RX_MODE_4x8:
-               channels = 0xf;
-               break;
-
-       case IXP2400_RX_MODE_1x16_2x8:
-               channels = 0xd;
-               break;
-       }
-
-       return channels;
-}
-
-static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
-{
-       u32 value;
-
-       value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
-       value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
-       ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
-}
-
-static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
-{
-       u32 value;
-
-       value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
-       value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
-       ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
-}
-
-
-void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
-{
-       u32 value;
-       int i;
-
-       /*
-        * Init the RX/TX PLLs based on the passed parameter block.
-        */
-       ixp2400_pll_init(mp);
-
-       /*
-        * Reset MSF.  Bit 7 in IXP_RESET_0 resets the MSF.
-        */
-       value = ixp2000_reg_read(IXP2000_RESET0);
-       ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
-       ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
-
-       /*
-        * Initialise the RX section.
-        */
-       ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
-       ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
-       for (i = 0; i < 4; i++) {
-               ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
-                                               mp->rx_channel_mode[i]);
-       }
-       ixp2400_msf_free_rbuf_entries(mp);
-       ixp2400_msf_enable_rx(mp);
-
-       /*
-        * Initialise the TX section.
-        */
-       ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
-       ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
-       for (i = 0; i < 4; i++) {
-               ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
-                                               mp->tx_channel_mode[i]);
-       }
-       ixp2400_msf_enable_tx(mp);
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
deleted file mode 100644 (file)
index 3ac1af2..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Generic library functions for the MSF (Media and Switch Fabric) unit
- * found on the Intel IXP2400 network processor.
- *
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of the
- * License, or (at your option) any later version.
- */
-
-#ifndef __IXP2400_MSF_H
-#define __IXP2400_MSF_H
-
-struct ixp2400_msf_parameters
-{
-       u32                             rx_mode;
-       unsigned                        rxclk01_multiplier:2;
-       unsigned                        rxclk23_multiplier:2;
-       unsigned                        rx_poll_ports:6;
-       u32                             rx_channel_mode[4];
-
-       u32                             tx_mode;
-       unsigned                        txclk01_multiplier:2;
-       unsigned                        txclk23_multiplier:2;
-       unsigned                        tx_poll_ports:6;
-       u32                             tx_channel_mode[4];
-};
-
-void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
-
-#define IXP2400_PLL_MULTIPLIER_48              0x00
-#define IXP2400_PLL_MULTIPLIER_24              0x01
-#define IXP2400_PLL_MULTIPLIER_16              0x02
-#define IXP2400_PLL_MULTIPLIER_12              0x03
-
-#define IXP2400_RX_MODE_CSIX                   0x00400000
-#define IXP2400_RX_MODE_UTOPIA_POS             0x00000000
-#define IXP2400_RX_MODE_WIDTH_MASK             0x00300000
-#define IXP2400_RX_MODE_1x16_2x8               0x00300000
-#define IXP2400_RX_MODE_4x8                    0x00200000
-#define IXP2400_RX_MODE_2x16                   0x00100000
-#define IXP2400_RX_MODE_1x32                   0x00000000
-#define IXP2400_RX_MODE_MPHY                   0x00080000
-#define IXP2400_RX_MODE_SPHY                   0x00000000
-#define IXP2400_RX_MODE_MPHY_32                        0x00040000
-#define IXP2400_RX_MODE_MPHY_4                 0x00000000
-#define IXP2400_RX_MODE_MPHY_POLLED_STATUS     0x00020000
-#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS     0x00000000
-#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX       0x00010000
-#define IXP2400_RX_MODE_CBUS_SIMPLEX           0x00000000
-#define IXP2400_RX_MODE_MPHY_LEVEL2            0x00004000
-#define IXP2400_RX_MODE_MPHY_LEVEL3            0x00000000
-#define IXP2400_RX_MODE_CBUS_8BIT              0x00002000
-#define IXP2400_RX_MODE_CBUS_4BIT              0x00000000
-#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST   0x00000200
-#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS   0x00000000
-#define IXP2400_RX_MODE_RBUF_SIZE_MASK         0x0000000c
-#define IXP2400_RX_MODE_RBUF_SIZE_256          0x00000008
-#define IXP2400_RX_MODE_RBUF_SIZE_128          0x00000004
-#define IXP2400_RX_MODE_RBUF_SIZE_64           0x00000000
-
-#define IXP2400_PORT_RX_MODE_SLAVE             0x00000040
-#define IXP2400_PORT_RX_MODE_MASTER            0x00000000
-#define IXP2400_PORT_RX_MODE_POS_PHY_L3                0x00000020
-#define IXP2400_PORT_RX_MODE_POS_PHY_L2                0x00000000
-#define IXP2400_PORT_RX_MODE_POS_PHY           0x00000010
-#define IXP2400_PORT_RX_MODE_UTOPIA            0x00000000
-#define IXP2400_PORT_RX_MODE_EVEN_PARITY       0x0000000c
-#define IXP2400_PORT_RX_MODE_ODD_PARITY                0x00000008
-#define IXP2400_PORT_RX_MODE_NO_PARITY         0x00000000
-#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS  0x00000002
-#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS       0x00000000
-#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE    0x00000001
-#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE    0x00000000
-
-#define IXP2400_TX_MODE_CSIX                   0x00400000
-#define IXP2400_TX_MODE_UTOPIA_POS             0x00000000
-#define IXP2400_TX_MODE_WIDTH_MASK             0x00300000
-#define IXP2400_TX_MODE_1x16_2x8               0x00300000
-#define IXP2400_TX_MODE_4x8                    0x00200000
-#define IXP2400_TX_MODE_2x16                   0x00100000
-#define IXP2400_TX_MODE_1x32                   0x00000000
-#define IXP2400_TX_MODE_MPHY                   0x00080000
-#define IXP2400_TX_MODE_SPHY                   0x00000000
-#define IXP2400_TX_MODE_MPHY_32                        0x00040000
-#define IXP2400_TX_MODE_MPHY_4                 0x00000000
-#define IXP2400_TX_MODE_MPHY_POLLED_STATUS     0x00020000
-#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS     0x00000000
-#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX       0x00010000
-#define IXP2400_TX_MODE_CBUS_SIMPLEX           0x00000000
-#define IXP2400_TX_MODE_MPHY_LEVEL2            0x00004000
-#define IXP2400_TX_MODE_MPHY_LEVEL3            0x00000000
-#define IXP2400_TX_MODE_CBUS_8BIT              0x00002000
-#define IXP2400_TX_MODE_CBUS_4BIT              0x00000000
-#define IXP2400_TX_MODE_TBUF_SIZE_MASK         0x0000000c
-#define IXP2400_TX_MODE_TBUF_SIZE_256          0x00000008
-#define IXP2400_TX_MODE_TBUF_SIZE_128          0x00000004
-#define IXP2400_TX_MODE_TBUF_SIZE_64           0x00000000
-
-#define IXP2400_PORT_TX_MODE_SLAVE             0x00000040
-#define IXP2400_PORT_TX_MODE_MASTER            0x00000000
-#define IXP2400_PORT_TX_MODE_POS_PHY           0x00000010
-#define IXP2400_PORT_TX_MODE_UTOPIA            0x00000000
-#define IXP2400_PORT_TX_MODE_EVEN_PARITY       0x0000000c
-#define IXP2400_PORT_TX_MODE_ODD_PARITY                0x00000008
-#define IXP2400_PORT_TX_MODE_NO_PARITY         0x00000000
-#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS  0x00000002
-#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE    0x00000001
-#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE    0x00000000
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
deleted file mode 100644 (file)
index 42a73e3..0000000
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * RX ucode for the Intel IXP2400 in POS-PHY mode.
- * Copyright (C) 2004, 2005 Lennert Buytenhek
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Assumptions made in this code:
- * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
- *   only one full element list is used.  This includes, for example,
- *   1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4.  (This
- *   is not an exhaustive list.)
- * - The RBUF uses 64-byte mpackets.
- * - RX descriptors reside in SRAM, and have the following format:
- *     struct rx_desc
- *     {
- *     // to uengine
- *             u32     buf_phys_addr;
- *             u32     buf_length;
- *
- *     // from uengine
- *             u32     channel;
- *             u32     pkt_length;
- *     };
- * - Packet data resides in DRAM.
- * - Packet buffer addresses are 8-byte aligned.
- * - Scratch ring 0 is rx_pending.
- * - Scratch ring 1 is rx_done, and has status condition 'full'.
- * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
- * - This code is run on all eight threads of the microengine it runs on.
- *
- * Local memory is used for per-channel RX state.
- */
-
-#define RX_THREAD_FREELIST_0           0x0030
-#define RBUF_ELEMENT_DONE              0x0044
-
-#define CHANNEL_FLAGS                  *l$index0[0]
-#define CHANNEL_FLAG_RECEIVING         1
-#define PACKET_LENGTH                  *l$index0[1]
-#define PACKET_CHECKSUM                        *l$index0[2]
-#define BUFFER_HANDLE                  *l$index0[3]
-#define BUFFER_START                   *l$index0[4]
-#define BUFFER_LENGTH                  *l$index0[5]
-
-#define CHANNEL_STATE_SIZE             24      // in bytes
-#define CHANNEL_STATE_SHIFT            5       // ceil(log2(state size))
-
-
-       .sig volatile sig1
-       .sig volatile sig2
-       .sig volatile sig3
-
-       .sig mpacket_arrived
-       .reg add_to_rx_freelist
-       .reg read $rsw0, $rsw1
-       .xfer_order $rsw0 $rsw1
-
-       .reg zero
-
-       /*
-        * Initialise add_to_rx_freelist.
-        */
-       .begin
-               .reg temp
-               .reg temp2
-
-               immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
-               immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
-
-               local_csr_rd[ACTIVE_CTX_STS]
-               immed[temp, 0]
-               alu[temp2, temp, and, 0x1f]
-               alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
-               alu[temp2, temp, and, 0x80]
-               alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
-       .end
-
-       immed[zero, 0]
-
-       /*
-        * Skip context 0 initialisation?
-        */
-       .begin
-               br!=ctx[0, mpacket_receive_loop#]
-       .end
-
-       /*
-        * Initialise local memory.
-        */
-       .begin
-               .reg addr
-               .reg temp
-
-               immed[temp, 0]
-       init_local_mem_loop#:
-               alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
-               local_csr_wr[ACTIVE_LM_ADDR_0, addr]
-               nop
-               nop
-               nop
-
-               immed[CHANNEL_FLAGS, 0]
-
-               alu[temp, temp, +, 1]
-               alu[--, temp, and, 0x20]
-               beq[init_local_mem_loop#]
-       .end
-
-       /*
-        * Initialise signal pipeline.
-        */
-       .begin
-               local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
-               .set_sig sig1
-
-               local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
-               .set_sig sig2
-
-               local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
-               .set_sig sig3
-       .end
-
-mpacket_receive_loop#:
-       /*
-        * Synchronise and wait for mpacket.
-        */
-       .begin
-               ctx_arb[sig1]
-               local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
-
-               msf[fast_wr, --, add_to_rx_freelist, 0]
-               .set_sig mpacket_arrived
-               ctx_arb[mpacket_arrived]
-               .set $rsw0 $rsw1
-       .end
-
-       /*
-        * We halt if we see {inbparerr,parerr,null,soperror}.
-        */
-       .begin
-               alu_shf[--, 0x1b, and, $rsw0, >>8]
-               bne[abort_rswerr#]
-       .end
-
-       /*
-        * Point local memory pointer to this channel's state area.
-        */
-       .begin
-               .reg chanaddr
-
-               alu[chanaddr, $rsw0, and, 0x1f]
-               alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
-               local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
-               nop
-               nop
-               nop
-       .end
-
-       /*
-        * Check whether we received a SOP mpacket while we were already
-        * working on a packet, or a non-SOP mpacket while there was no
-        * packet pending.  (SOP == RECEIVING -> abort)  If everything's
-        * okay, update the RECEIVING flag to reflect our new state.
-        */
-       .begin
-               .reg temp
-               .reg eop
-
-               #if CHANNEL_FLAG_RECEIVING != 1
-               #error CHANNEL_FLAG_RECEIVING is not 1
-               #endif
-
-               alu_shf[temp, 1, and, $rsw0, >>15]
-               alu[temp, temp, xor, CHANNEL_FLAGS]
-               alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
-               beq[abort_proterr#]
-
-               alu_shf[eop, 1, and, $rsw0, >>14]
-               alu[CHANNEL_FLAGS, temp, xor, eop]
-       .end
-
-       /*
-        * Copy the mpacket into the right spot, and in case of EOP,
-        * write back the descriptor and pass the packet on.
-        */
-       .begin
-               .reg buffer_offset
-               .reg _packet_length
-               .reg _packet_checksum
-               .reg _buffer_handle
-               .reg _buffer_start
-               .reg _buffer_length
-
-               /*
-                * Determine buffer_offset, _packet_length and
-                * _packet_checksum.
-                */
-               .begin
-                       .reg temp
-
-                       alu[--, 1, and, $rsw0, >>15]
-                       beq[not_sop#]
-
-                       immed[PACKET_LENGTH, 0]
-                       immed[PACKET_CHECKSUM, 0]
-
-               not_sop#:
-                       alu[buffer_offset, --, b, PACKET_LENGTH]
-                       alu_shf[temp, 0xff, and, $rsw0, >>16]
-                       alu[_packet_length, buffer_offset, +, temp]
-                       alu[PACKET_LENGTH, --, b, _packet_length]
-
-                       immed[temp, 0xffff]
-                       alu[temp, $rsw1, and, temp]
-                       alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
-                       alu[PACKET_CHECKSUM, --, b, _packet_checksum]
-               .end
-
-               /*
-                * Allocate buffer in case of SOP.
-                */
-               .begin
-                       .reg temp
-
-                       alu[temp, 1, and, $rsw0, >>15]
-                       beq[skip_buffer_alloc#]
-
-                       .begin
-                               .sig zzz
-                               .reg read $stemp $stemp2
-                               .xfer_order $stemp $stemp2
-
-                       rx_nobufs#:
-                               scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
-                               alu[_buffer_handle, --, b, $stemp]
-                               beq[rx_nobufs#]
-
-                               sram[read, $stemp, _buffer_handle, 0, 2],
-                                                               ctx_swap[zzz]
-                               alu[_buffer_start, --, b, $stemp]
-                               alu[_buffer_length, --, b, $stemp2]
-                       .end
-
-               skip_buffer_alloc#:
-               .end
-
-               /*
-                * Resynchronise.
-                */
-               .begin
-                       ctx_arb[sig2]
-                       local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
-               .end
-
-               /*
-                * Synchronise buffer state.
-                */
-               .begin
-                       .reg temp
-
-                       alu[temp, 1, and, $rsw0, >>15]
-                       beq[copy_from_local_mem#]
-
-                       alu[BUFFER_HANDLE, --, b, _buffer_handle]
-                       alu[BUFFER_START, --, b, _buffer_start]
-                       alu[BUFFER_LENGTH, --, b, _buffer_length]
-                       br[sync_state_done#]
-
-               copy_from_local_mem#:
-                       alu[_buffer_handle, --, b, BUFFER_HANDLE]
-                       alu[_buffer_start, --, b, BUFFER_START]
-                       alu[_buffer_length, --, b, BUFFER_LENGTH]
-
-               sync_state_done#:
-               .end
-
-#if 0
-               /*
-                * Debug buffer state management.
-                */
-               .begin
-                       .reg temp
-
-                       alu[temp, 1, and, $rsw0, >>14]
-                       beq[no_poison#]
-                       immed[BUFFER_HANDLE, 0xdead]
-                       immed[BUFFER_START, 0xdead]
-                       immed[BUFFER_LENGTH, 0xdead]
-               no_poison#:
-
-                       immed[temp, 0xdead]
-                       alu[--, _buffer_handle, -, temp]
-                       beq[state_corrupted#]
-                       alu[--, _buffer_start, -, temp]
-                       beq[state_corrupted#]
-                       alu[--, _buffer_length, -, temp]
-                       beq[state_corrupted#]
-               .end
-#endif
-
-               /*
-                * Check buffer length.
-                */
-               .begin
-                       alu[--, _buffer_length, -, _packet_length]
-                       blo[buffer_overflow#]
-               .end
-
-               /*
-                * Copy the mpacket and give back the RBUF element.
-                */
-               .begin
-                       .reg element
-                       .reg xfer_size
-                       .reg temp
-                       .sig copy_sig
-
-                       alu_shf[element, 0x7f, and, $rsw0, >>24]
-                       alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
-
-                       alu[xfer_size, xfer_size, -, 1]
-                       alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
-                       alu_shf[temp, 0x10, or, xfer_size, <<21]
-                       alu_shf[temp, temp, or, element, <<11]
-                       alu_shf[--, temp, or, 1, <<18]
-
-                       dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
-                                               indirect_ref, sig_done[copy_sig]
-                       ctx_arb[copy_sig]
-
-                       alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
-                       msf[fast_wr, --, temp, 0]
-               .end
-
-               /*
-                * If EOP, write back the packet descriptor.
-                */
-               .begin
-                       .reg write $stemp $stemp2
-                       .xfer_order $stemp $stemp2
-                       .sig zzz
-
-                       alu_shf[--, 1, and, $rsw0, >>14]
-                       beq[no_writeback#]
-
-                       alu[$stemp, $rsw0, and, 0x1f]
-                       alu[$stemp2, --, b, _packet_length]
-                       sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
-
-               no_writeback#:
-               .end
-
-               /*
-                * Resynchronise.
-                */
-               .begin
-                       ctx_arb[sig3]
-                       local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
-               .end
-
-               /*
-                * If EOP, put the buffer back onto the scratch ring.
-                */
-               .begin
-                       .reg write $stemp
-                       .sig zzz
-
-                       br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
-
-                       alu_shf[--, 1, and, $rsw0, >>14]
-                       beq[mpacket_receive_loop#]
-
-                       alu[--, 1, and, $rsw0, >>10]
-                       bne[rxerr#]
-
-                       alu[$stemp, --, b, _buffer_handle]
-                       scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
-                       cap[fast_wr, 0, XSCALE_INT_A]
-                       br[mpacket_receive_loop#]
-
-               rxerr#:
-                       alu[$stemp, --, b, _buffer_handle]
-                       scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
-                       br[mpacket_receive_loop#]
-               .end
-       .end
-
-
-abort_rswerr#:
-       halt
-
-abort_proterr#:
-       halt
-
-state_corrupted#:
-       halt
-
-buffer_overflow#:
-       halt
-
-rx_done_ring_overflow#:
-       halt
-
-
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
deleted file mode 100644 (file)
index e8aee2f..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-static struct ixp2000_uengine_code ixp2400_rx =
-{
-       .cpu_model_bitmask      = 0x000003fe,
-       .cpu_min_revision       = 0,
-       .cpu_max_revision       = 255,
-
-       .uengine_parameters     = IXP2000_UENGINE_8_CONTEXTS |
-                                 IXP2000_UENGINE_PRN_UPDATE_EVERY |
-                                 IXP2000_UENGINE_NN_FROM_PREVIOUS |
-                                 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
-                                 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
-                                 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
-
-       .initial_reg_values     = (struct ixp2000_reg_value []) {
-               { -1, -1 }
-       },
-
-       .num_insns              = 109,
-       .insns                  = (u8 []) {
-               0xf0, 0x00, 0x0c, 0xc0, 0x05,
-               0xf4, 0x44, 0x0c, 0x00, 0x05,
-               0xfc, 0x04, 0x4c, 0x00, 0x00,
-               0xf0, 0x00, 0x00, 0x3b, 0x00,
-               0xb4, 0x40, 0xf0, 0x3b, 0x1f,
-               0x8a, 0xc0, 0x50, 0x3e, 0x05,
-               0xb4, 0x40, 0xf0, 0x3b, 0x80,
-               0x9a, 0xe0, 0x00, 0x3e, 0x05,
-               0xf0, 0x00, 0x00, 0x07, 0x00,
-               0xd8, 0x05, 0xc0, 0x00, 0x11,
-               0xf0, 0x00, 0x00, 0x0f, 0x00,
-               0x91, 0xb0, 0x20, 0x0e, 0x00,
-               0xfc, 0x06, 0x60, 0x0b, 0x00,
-               0xf0, 0x00, 0x0c, 0x03, 0x00,
-               0xf0, 0x00, 0x0c, 0x03, 0x00,
-               0xf0, 0x00, 0x0c, 0x03, 0x00,
-               0xf0, 0x00, 0x0c, 0x02, 0x00,
-               0xb0, 0xc0, 0x30, 0x0f, 0x01,
-               0xa4, 0x70, 0x00, 0x0f, 0x20,
-               0xd8, 0x02, 0xc0, 0x01, 0x00,
-               0xfc, 0x10, 0xac, 0x23, 0x08,
-               0xfc, 0x10, 0xac, 0x43, 0x10,
-               0xfc, 0x10, 0xac, 0x63, 0x18,
-               0xe0, 0x00, 0x00, 0x00, 0x02,
-               0xfc, 0x10, 0xae, 0x23, 0x88,
-               0x3d, 0x00, 0x04, 0x03, 0x20,
-               0xe0, 0x00, 0x00, 0x00, 0x10,
-               0x84, 0x82, 0x02, 0x01, 0x3b,
-               0xd8, 0x1a, 0x00, 0x01, 0x01,
-               0xb4, 0x00, 0x8c, 0x7d, 0x80,
-               0x91, 0xb0, 0x80, 0x22, 0x00,
-               0xfc, 0x06, 0x60, 0x23, 0x00,
-               0xf0, 0x00, 0x0c, 0x03, 0x00,
-               0xf0, 0x00, 0x0c, 0x03, 0x00,
-               0xf0, 0x00, 0x0c, 0x03, 0x00,
-               0x94, 0xf0, 0x92, 0x01, 0x21,
-               0xac, 0x40, 0x60, 0x26, 0x00,
-               0xa4, 0x30, 0x0c, 0x04, 0x06,
-               0xd8, 0x1a, 0x40, 0x01, 0x00,
-               0x94, 0xe0, 0xa2, 0x01, 0x21,
-               0xac, 0x20, 0x00, 0x28, 0x06,
-               0x84, 0xf2, 0x02, 0x01, 0x21,
-               0xd8, 0x0b, 0x40, 0x01, 0x00,
-               0xf0, 0x00, 0x0c, 0x02, 0x01,
-               0xf0, 0x00, 0x0c, 0x02, 0x02,
-               0xa0, 0x00, 0x08, 0x04, 0x00,
-               0x95, 0x00, 0xc6, 0x01, 0xff,
-               0xa0, 0x80, 0x10, 0x30, 0x00,
-               0xa0, 0x60, 0x1c, 0x00, 0x01,
-               0xf0, 0x0f, 0xf0, 0x33, 0xff,
-               0xb4, 0x00, 0xc0, 0x31, 0x81,
-               0xb0, 0x80, 0xb0, 0x32, 0x02,
-               0xa0, 0x20, 0x20, 0x2c, 0x00,
-               0x94, 0xf0, 0xd2, 0x01, 0x21,
-               0xd8, 0x0f, 0x40, 0x01, 0x00,
-               0x19, 0x40, 0x10, 0x04, 0x20,
-               0xa0, 0x00, 0x26, 0x04, 0x00,
-               0xd8, 0x0d, 0xc0, 0x01, 0x00,
-               0x00, 0x42, 0x10, 0x80, 0x02,
-               0xb0, 0x00, 0x46, 0x04, 0x00,
-               0xb0, 0x00, 0x56, 0x08, 0x00,
-               0xe0, 0x00, 0x00, 0x00, 0x04,
-               0xfc, 0x10, 0xae, 0x43, 0x90,
-               0x84, 0xf0, 0x32, 0x01, 0x21,
-               0xd8, 0x11, 0x40, 0x01, 0x00,
-               0xa0, 0x60, 0x3c, 0x00, 0x02,
-               0xa0, 0x20, 0x40, 0x10, 0x00,
-               0xa0, 0x20, 0x50, 0x14, 0x00,
-               0xd8, 0x12, 0x00, 0x00, 0x18,
-               0xa0, 0x00, 0x28, 0x0c, 0x00,
-               0xb0, 0x00, 0x48, 0x10, 0x00,
-               0xb0, 0x00, 0x58, 0x14, 0x00,
-               0xaa, 0xf0, 0x00, 0x14, 0x01,
-               0xd8, 0x1a, 0xc0, 0x01, 0x05,
-               0x85, 0x80, 0x42, 0x01, 0xff,
-               0x95, 0x00, 0x66, 0x01, 0xff,
-               0xba, 0xc0, 0x60, 0x1b, 0x01,
-               0x9a, 0x30, 0x60, 0x19, 0x30,
-               0x9a, 0xb0, 0x70, 0x1a, 0x30,
-               0x9b, 0x50, 0x78, 0x1e, 0x04,
-               0x8a, 0xe2, 0x08, 0x1e, 0x21,
-               0x6a, 0x4e, 0x00, 0x13, 0x00,
-               0xe0, 0x00, 0x00, 0x00, 0x30,
-               0x9b, 0x00, 0x7a, 0x92, 0x04,
-               0x3d, 0x00, 0x04, 0x1f, 0x20,
-               0x84, 0xe2, 0x02, 0x01, 0x21,
-               0xd8, 0x16, 0x80, 0x01, 0x00,
-               0xa4, 0x18, 0x0c, 0x7d, 0x80,
-               0xa0, 0x58, 0x1c, 0x00, 0x01,
-               0x01, 0x42, 0x00, 0xa0, 0x02,
-               0xe0, 0x00, 0x00, 0x00, 0x08,
-               0xfc, 0x10, 0xae, 0x63, 0x98,
-               0xd8, 0x1b, 0x00, 0xc2, 0x14,
-               0x84, 0xe2, 0x02, 0x01, 0x21,
-               0xd8, 0x05, 0xc0, 0x01, 0x00,
-               0x84, 0xa2, 0x02, 0x01, 0x21,
-               0xd8, 0x19, 0x40, 0x01, 0x01,
-               0xa0, 0x58, 0x0c, 0x00, 0x02,
-               0x1a, 0x40, 0x00, 0x04, 0x24,
-               0x33, 0x00, 0x01, 0x2f, 0x20,
-               0xd8, 0x05, 0xc0, 0x00, 0x18,
-               0xa0, 0x58, 0x0c, 0x00, 0x02,
-               0x1a, 0x40, 0x00, 0x04, 0x20,
-               0xd8, 0x05, 0xc0, 0x00, 0x18,
-               0xe0, 0x00, 0x02, 0x00, 0x00,
-               0xe0, 0x00, 0x02, 0x00, 0x00,
-               0xe0, 0x00, 0x02, 0x00, 0x00,
-               0xe0, 0x00, 0x02, 0x00, 0x00,
-               0xe0, 0x00, 0x02, 0x00, 0x00,
-       }
-};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
deleted file mode 100644 (file)
index d090d18..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * TX ucode for the Intel IXP2400 in POS-PHY mode.
- * Copyright (C) 2004, 2005 Lennert Buytenhek
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Assumptions made in this code:
- * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
- *   only one TBUF partition is used.  This includes, for example,
- *   1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
- *   is not an exhaustive list.)
- * - The TBUF uses 64-byte mpackets.
- * - TX descriptors reside in SRAM, and have the following format:
- *     struct tx_desc
- *     {
- *     // to uengine
- *             u32     buf_phys_addr;
- *             u32     pkt_length;
- *             u32     channel;
- *     };
- * - Packet data resides in DRAM.
- * - Packet buffer addresses are 8-byte aligned.
- * - Scratch ring 2 is tx_pending.
- * - Scratch ring 3 is tx_done, and has status condition 'full'.
- * - This code is run on all eight threads of the microengine it runs on.
- */
-
-#define TX_SEQUENCE_0          0x0060
-#define TBUF_CTRL              0x1800
-
-#define PARTITION_SIZE         128
-#define PARTITION_THRESH       96
-
-
-       .sig volatile sig1
-       .sig volatile sig2
-       .sig volatile sig3
-
-       .reg @old_tx_seq_0
-       .reg @mpkts_in_flight
-       .reg @next_tbuf_mpacket
-
-       .reg @buffer_handle
-       .reg @buffer_start
-       .reg @packet_length
-       .reg @channel
-       .reg @packet_offset
-
-       .reg zero
-
-       immed[zero, 0]
-
-       /*
-        * Skip context 0 initialisation?
-        */
-       .begin
-               br!=ctx[0, mpacket_tx_loop#]
-       .end
-
-       /*
-        * Wait until all pending TBUF elements have been transmitted.
-        */
-       .begin
-               .reg read $tx
-               .sig zzz
-
-       loop_empty#:
-               msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
-               alu_shf[--, --, b, $tx, >>31]
-               beq[loop_empty#]
-
-               alu[@old_tx_seq_0, --, b, $tx]
-       .end
-
-       immed[@mpkts_in_flight, 0]
-       alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
-
-       immed[@buffer_handle, 0]
-
-       /*
-        * Initialise signal pipeline.
-        */
-       .begin
-               local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
-               .set_sig sig1
-
-               local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
-               .set_sig sig2
-
-               local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
-               .set_sig sig3
-       .end
-
-mpacket_tx_loop#:
-       .begin
-               .reg tbuf_element_index
-               .reg buffer_handle
-               .reg sop_eop
-               .reg packet_data
-               .reg channel
-               .reg mpacket_size
-
-               /*
-                * If there is no packet currently being transmitted,
-                * dequeue the next TX descriptor, and fetch the buffer
-                * address, packet length and destination channel number.
-                */
-               .begin
-                       .reg read $stemp $stemp2 $stemp3
-                       .xfer_order $stemp $stemp2 $stemp3
-                       .sig zzz
-
-                       ctx_arb[sig1]
-
-                       alu[--, --, b, @buffer_handle]
-                       bne[already_got_packet#]
-
-               tx_nobufs#:
-                       scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
-                       alu[@buffer_handle, --, b, $stemp]
-                       beq[tx_nobufs#]
-
-                       sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
-                       alu[@buffer_start, --, b, $stemp]
-                       alu[@packet_length, --, b, $stemp2]
-                       beq[zero_byte_packet#]
-                       alu[@channel, --, b, $stemp3]
-                       immed[@packet_offset, 0]
-
-               already_got_packet#:
-                       local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
-               .end
-
-               /*
-                * Determine tbuf element index, SOP/EOP flags, mpacket
-                * offset and mpacket size and cache buffer_handle and
-                * channel number.
-                */
-               .begin
-                       alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
-                       alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
-                       alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
-                                                       (PARTITION_SIZE - 1)]
-
-                       alu[buffer_handle, --, b, @buffer_handle]
-                       immed[@buffer_handle, 0]
-
-                       immed[sop_eop, 1]
-
-                       alu[packet_data, --, b, @packet_offset]
-                       bne[no_sop#]
-                       alu[sop_eop, sop_eop, or, 2]
-               no_sop#:
-                       alu[packet_data, packet_data, +, @buffer_start]
-
-                       alu[channel, --, b, @channel]
-
-                       alu[mpacket_size, @packet_length, -, @packet_offset]
-                       alu[--, 64, -, mpacket_size]
-                       bhs[eop#]
-                       alu[@buffer_handle, --, b, buffer_handle]
-                       immed[mpacket_size, 64]
-                       alu[sop_eop, sop_eop, and, 2]
-               eop#:
-
-                       alu[@packet_offset, @packet_offset, +, mpacket_size]
-               .end
-
-               /*
-                * Wait until there's enough space in the TBUF.
-                */
-               .begin
-                       .reg read $tx
-                       .reg temp
-                       .sig zzz
-
-                       ctx_arb[sig2]
-
-                       br[test_space#]
-
-               loop_space#:
-                       msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
-
-                       alu[temp, $tx, -, @old_tx_seq_0]
-                       alu[temp, temp, and, 0xff]
-                       alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
-
-                       alu[@old_tx_seq_0, --, b, $tx]
-
-               test_space#:
-                       alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
-                       blo[loop_space#]
-
-                       alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
-
-                       local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
-               .end
-
-               /*
-                * Copy the packet data to the TBUF.
-                */
-               .begin
-                       .reg temp
-                       .sig copy_sig
-
-                       alu[temp, mpacket_size, -, 1]
-                       alu_shf[temp, 0x10, or, temp, >>3]
-                       alu_shf[temp, 0x10, or, temp, <<21]
-                       alu_shf[temp, temp, or, tbuf_element_index, <<11]
-                       alu_shf[--, temp, or, 1, <<18]
-
-                       dram[tbuf_wr, --, packet_data, 0, max_8],
-                                       indirect_ref, sig_done[copy_sig]
-                       ctx_arb[copy_sig]
-               .end
-
-               /*
-                * Mark TBUF element as ready-to-be-transmitted.
-                */
-               .begin
-                       .reg write $tsw $tsw2
-                       .xfer_order $tsw $tsw2
-                       .reg temp
-                       .sig zzz
-
-                       alu_shf[temp, channel, or, mpacket_size, <<24]
-                       alu_shf[$tsw, temp, or, sop_eop, <<8]
-                       immed[$tsw2, 0]
-
-                       immed[temp, TBUF_CTRL]
-                       alu_shf[temp, temp, or, tbuf_element_index, <<3]
-                       msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
-               .end
-
-               /*
-                * Resynchronise.
-                */
-               .begin
-                       ctx_arb[sig3]
-                       local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
-               .end
-
-               /*
-                * If this was an EOP mpacket, recycle the TX buffer
-                * and signal the host.
-                */
-               .begin
-                       .reg write $stemp
-                       .sig zzz
-
-                       alu[--, sop_eop, and, 1]
-                       beq[mpacket_tx_loop#]
-
-               tx_done_ring_full#:
-                       br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
-
-                       alu[$stemp, --, b, buffer_handle]
-                       scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
-                       cap[fast_wr, 0, XSCALE_INT_A]
-                       br[mpacket_tx_loop#]
-               .end
-       .end
-
-
-zero_byte_packet#:
-       halt
-
-
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
deleted file mode 100644 (file)
index a433e24..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-static struct ixp2000_uengine_code ixp2400_tx =
-{
-       .cpu_model_bitmask      = 0x000003fe,
-       .cpu_min_revision       = 0,
-       .cpu_max_revision       = 255,
-
-       .uengine_parameters     = IXP2000_UENGINE_8_CONTEXTS |
-                                 IXP2000_UENGINE_PRN_UPDATE_EVERY |
-                                 IXP2000_UENGINE_NN_FROM_PREVIOUS |
-                                 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
-                                 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
-                                 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
-
-       .initial_reg_values     = (struct ixp2000_reg_value []) {
-               { -1, -1 }
-       },
-
-       .num_insns              = 77,
-       .insns                  = (u8 []) {
-               0xf0, 0x00, 0x00, 0x07, 0x00,
-               0xd8, 0x03, 0x00, 0x00, 0x11,
-               0x3c, 0x40, 0x00, 0x04, 0xe0,
-               0x81, 0xf2, 0x02, 0x01, 0x00,
-               0xd8, 0x00, 0x80, 0x01, 0x00,
-               0xb0, 0x08, 0x06, 0x00, 0x00,
-               0xf0, 0x00, 0x0c, 0x00, 0x80,
-               0xb4, 0x49, 0x02, 0x03, 0x7f,
-               0xf0, 0x00, 0x02, 0x83, 0x00,
-               0xfc, 0x10, 0xac, 0x23, 0x08,
-               0xfc, 0x10, 0xac, 0x43, 0x10,
-               0xfc, 0x10, 0xac, 0x63, 0x18,
-               0xe0, 0x00, 0x00, 0x00, 0x02,
-               0xa0, 0x30, 0x02, 0x80, 0x00,
-               0xd8, 0x06, 0x00, 0x01, 0x01,
-               0x19, 0x40, 0x00, 0x04, 0x28,
-               0xb0, 0x0a, 0x06, 0x00, 0x00,
-               0xd8, 0x03, 0xc0, 0x01, 0x00,
-               0x00, 0x44, 0x00, 0x80, 0x80,
-               0xa0, 0x09, 0x06, 0x00, 0x00,
-               0xb0, 0x0b, 0x06, 0x04, 0x00,
-               0xd8, 0x13, 0x00, 0x01, 0x00,
-               0xb0, 0x0c, 0x06, 0x08, 0x00,
-               0xf0, 0x00, 0x0c, 0x00, 0xa0,
-               0xfc, 0x10, 0xae, 0x23, 0x88,
-               0xa0, 0x00, 0x12, 0x40, 0x00,
-               0xb0, 0xc9, 0x02, 0x43, 0x01,
-               0xb4, 0x49, 0x02, 0x43, 0x7f,
-               0xb0, 0x00, 0x22, 0x80, 0x00,
-               0xf0, 0x00, 0x02, 0x83, 0x00,
-               0xf0, 0x00, 0x0c, 0x04, 0x02,
-               0xb0, 0x40, 0x6c, 0x00, 0xa0,
-               0xd8, 0x08, 0x80, 0x01, 0x01,
-               0xaa, 0x00, 0x2c, 0x08, 0x02,
-               0xa0, 0xc0, 0x30, 0x18, 0x90,
-               0xa0, 0x00, 0x43, 0x00, 0x00,
-               0xba, 0xc0, 0x32, 0xc0, 0xa0,
-               0xaa, 0xb0, 0x00, 0x0f, 0x40,
-               0xd8, 0x0a, 0x80, 0x01, 0x04,
-               0xb0, 0x0a, 0x00, 0x08, 0x00,
-               0xf0, 0x00, 0x00, 0x0f, 0x40,
-               0xa4, 0x00, 0x2c, 0x08, 0x02,
-               0xa0, 0x8a, 0x00, 0x0c, 0xa0,
-               0xe0, 0x00, 0x00, 0x00, 0x04,
-               0xd8, 0x0c, 0x80, 0x00, 0x18,
-               0x3c, 0x40, 0x00, 0x04, 0xe0,
-               0xba, 0x80, 0x42, 0x01, 0x80,
-               0xb4, 0x40, 0x40, 0x13, 0xff,
-               0xaa, 0x88, 0x00, 0x10, 0x80,
-               0xb0, 0x08, 0x06, 0x00, 0x00,
-               0xaa, 0xf0, 0x0d, 0x80, 0x80,
-               0xd8, 0x0b, 0x40, 0x01, 0x05,
-               0xa0, 0x88, 0x0c, 0x04, 0x80,
-               0xfc, 0x10, 0xae, 0x43, 0x90,
-               0xba, 0xc0, 0x50, 0x0f, 0x01,
-               0x9a, 0x30, 0x50, 0x15, 0x30,
-               0x9a, 0xb0, 0x50, 0x16, 0x30,
-               0x9b, 0x50, 0x58, 0x16, 0x01,
-               0x8a, 0xe2, 0x08, 0x16, 0x21,
-               0x6b, 0x4e, 0x00, 0x83, 0x03,
-               0xe0, 0x00, 0x00, 0x00, 0x30,
-               0x9a, 0x80, 0x70, 0x0e, 0x04,
-               0x8b, 0x88, 0x08, 0x1e, 0x02,
-               0xf0, 0x00, 0x0c, 0x01, 0x81,
-               0xf0, 0x01, 0x80, 0x1f, 0x00,
-               0x9b, 0xd0, 0x78, 0x1e, 0x01,
-               0x3d, 0x42, 0x00, 0x1c, 0x20,
-               0xe0, 0x00, 0x00, 0x00, 0x08,
-               0xfc, 0x10, 0xae, 0x63, 0x98,
-               0xa4, 0x30, 0x0c, 0x04, 0x02,
-               0xd8, 0x03, 0x00, 0x01, 0x00,
-               0xd8, 0x11, 0xc1, 0x42, 0x14,
-               0xa0, 0x18, 0x00, 0x08, 0x00,
-               0x1a, 0x40, 0x00, 0x04, 0x2c,
-               0x33, 0x00, 0x01, 0x2f, 0x20,
-               0xd8, 0x03, 0x00, 0x00, 0x18,
-               0xe0, 0x00, 0x02, 0x00, 0x00,
-       }
-};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
deleted file mode 100644 (file)
index 4500837..0000000
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
-#include <linux/gfp.h>
-#include <asm/hardware/uengine.h>
-#include <asm/io.h>
-#include "ixp2400_rx.ucode"
-#include "ixp2400_tx.ucode"
-#include "ixpdev_priv.h"
-#include "ixpdev.h"
-#include "pm3386.h"
-
-#define DRV_MODULE_VERSION     "0.2"
-
-static int nds_count;
-static struct net_device **nds;
-static int nds_open;
-static void (*set_port_admin_status)(int port, int up);
-
-static struct ixpdev_rx_desc * const rx_desc =
-       (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
-static struct ixpdev_tx_desc * const tx_desc =
-       (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
-static int tx_pointer;
-
-
-static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct ixpdev_priv *ip = netdev_priv(dev);
-       struct ixpdev_tx_desc *desc;
-       int entry;
-       unsigned long flags;
-
-       if (unlikely(skb->len > PAGE_SIZE)) {
-               /* @@@ Count drops.  */
-               dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
-       entry = tx_pointer;
-       tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
-
-       desc = tx_desc + entry;
-       desc->pkt_length = skb->len;
-       desc->channel = ip->channel;
-
-       skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
-       dev_kfree_skb(skb);
-
-       ixp2000_reg_write(RING_TX_PENDING,
-               TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
-
-       local_irq_save(flags);
-       ip->tx_queue_entries++;
-       if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
-               netif_stop_queue(dev);
-       local_irq_restore(flags);
-
-       return NETDEV_TX_OK;
-}
-
-
-static int ixpdev_rx(struct net_device *dev, int processed, int budget)
-{
-       while (processed < budget) {
-               struct ixpdev_rx_desc *desc;
-               struct sk_buff *skb;
-               void *buf;
-               u32 _desc;
-
-               _desc = ixp2000_reg_read(RING_RX_DONE);
-               if (_desc == 0)
-                       return 0;
-
-               desc = rx_desc +
-                       ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
-               buf = phys_to_virt(desc->buf_addr);
-
-               if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
-                       printk(KERN_ERR "ixp2000: rx err, length %d\n",
-                                       desc->pkt_length);
-                       goto err;
-               }
-
-               if (desc->channel < 0 || desc->channel >= nds_count) {
-                       printk(KERN_ERR "ixp2000: rx err, channel %d\n",
-                                       desc->channel);
-                       goto err;
-               }
-
-               /* @@@ Make FCS stripping configurable.  */
-               desc->pkt_length -= 4;
-
-               if (unlikely(!netif_running(nds[desc->channel])))
-                       goto err;
-
-               skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
-               if (likely(skb != NULL)) {
-                       skb_copy_to_linear_data(skb, buf, desc->pkt_length);
-                       skb_put(skb, desc->pkt_length);
-                       skb->protocol = eth_type_trans(skb, nds[desc->channel]);
-
-                       netif_receive_skb(skb);
-               }
-
-err:
-               ixp2000_reg_write(RING_RX_PENDING, _desc);
-               processed++;
-       }
-
-       return processed;
-}
-
-/* dev always points to nds[0].  */
-static int ixpdev_poll(struct napi_struct *napi, int budget)
-{
-       struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
-       struct net_device *dev = ip->dev;
-       int rx;
-
-       rx = 0;
-       do {
-               ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
-
-               rx = ixpdev_rx(dev, rx, budget);
-               if (rx >= budget)
-                       break;
-       } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
-
-       napi_complete(napi);
-       ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
-
-       return rx;
-}
-
-static void ixpdev_tx_complete(void)
-{
-       int channel;
-       u32 wake;
-
-       wake = 0;
-       while (1) {
-               struct ixpdev_priv *ip;
-               u32 desc;
-               int entry;
-
-               desc = ixp2000_reg_read(RING_TX_DONE);
-               if (desc == 0)
-                       break;
-
-               /* @@@ Check whether entries come back in order.  */
-               entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
-               channel = tx_desc[entry].channel;
-
-               if (channel < 0 || channel >= nds_count) {
-                       printk(KERN_ERR "ixp2000: txcomp channel index "
-                                       "out of bounds (%d, %.8i, %d)\n",
-                                       channel, (unsigned int)desc, entry);
-                       continue;
-               }
-
-               ip = netdev_priv(nds[channel]);
-               if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
-                       wake |= 1 << channel;
-               ip->tx_queue_entries--;
-       }
-
-       for (channel = 0; wake != 0; channel++) {
-               if (wake & (1 << channel)) {
-                       netif_wake_queue(nds[channel]);
-                       wake &= ~(1 << channel);
-               }
-       }
-}
-
-static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
-{
-       u32 status;
-
-       status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
-       if (status == 0)
-               return IRQ_NONE;
-
-       /*
-        * Any of the eight receive units signaled RX?
-        */
-       if (status & 0x00ff) {
-               struct net_device *dev = nds[0];
-               struct ixpdev_priv *ip = netdev_priv(dev);
-
-               ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
-               if (likely(napi_schedule_prep(&ip->napi))) {
-                       __napi_schedule(&ip->napi);
-               } else {
-                       printk(KERN_CRIT "ixp2000: irq while polling!!\n");
-               }
-       }
-
-       /*
-        * Any of the eight transmit units signaled TXdone?
-        */
-       if (status & 0xff00) {
-               ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
-               ixpdev_tx_complete();
-       }
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ixpdev_poll_controller(struct net_device *dev)
-{
-       disable_irq(IRQ_IXP2000_THDA0);
-       ixpdev_interrupt(IRQ_IXP2000_THDA0, dev);
-       enable_irq(IRQ_IXP2000_THDA0);
-}
-#endif
-
-static int ixpdev_open(struct net_device *dev)
-{
-       struct ixpdev_priv *ip = netdev_priv(dev);
-       int err;
-
-       napi_enable(&ip->napi);
-       if (!nds_open++) {
-               err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
-                                       IRQF_SHARED, "ixp2000_eth", nds);
-               if (err) {
-                       nds_open--;
-                       napi_disable(&ip->napi);
-                       return err;
-               }
-
-               ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
-       }
-
-       set_port_admin_status(ip->channel, 1);
-       netif_start_queue(dev);
-
-       return 0;
-}
-
-static int ixpdev_close(struct net_device *dev)
-{
-       struct ixpdev_priv *ip = netdev_priv(dev);
-
-       netif_stop_queue(dev);
-       napi_disable(&ip->napi);
-       set_port_admin_status(ip->channel, 0);
-
-       if (!--nds_open) {
-               ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
-               free_irq(IRQ_IXP2000_THDA0, nds);
-       }
-
-       return 0;
-}
-
-static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
-{
-       struct ixpdev_priv *ip = netdev_priv(dev);
-
-       pm3386_get_stats(ip->channel, &(dev->stats));
-
-       return &(dev->stats);
-}
-
-static const struct net_device_ops ixpdev_netdev_ops = {
-       .ndo_open               = ixpdev_open,
-       .ndo_stop               = ixpdev_close,
-       .ndo_start_xmit         = ixpdev_xmit,
-       .ndo_change_mtu         = eth_change_mtu,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_get_stats          = ixpdev_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixpdev_poll_controller,
-#endif
-};
-
-struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
-{
-       struct net_device *dev;
-       struct ixpdev_priv *ip;
-
-       dev = alloc_etherdev(sizeof_priv);
-       if (dev == NULL)
-               return NULL;
-
-       dev->netdev_ops = &ixpdev_netdev_ops;
-
-       dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
-       ip = netdev_priv(dev);
-       ip->dev = dev;
-       netif_napi_add(dev, &ip->napi, ixpdev_poll, 64);
-       ip->channel = channel;
-       ip->tx_queue_entries = 0;
-
-       return dev;
-}
-
-int ixpdev_init(int __nds_count, struct net_device **__nds,
-               void (*__set_port_admin_status)(int port, int up))
-{
-       int i;
-       int err;
-
-       BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);
-
-       printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
-
-       nds_count = __nds_count;
-       nds = __nds;
-       set_port_admin_status = __set_port_admin_status;
-
-       for (i = 0; i < RX_BUF_COUNT; i++) {
-               void *buf;
-
-               buf = (void *)get_zeroed_page(GFP_KERNEL);
-               if (buf == NULL) {
-                       err = -ENOMEM;
-                       while (--i >= 0)
-                               free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-                       goto err_out;
-               }
-               rx_desc[i].buf_addr = virt_to_phys(buf);
-               rx_desc[i].buf_length = PAGE_SIZE;
-       }
-
-       /* @@@ Maybe we shouldn't be preallocating TX buffers.  */
-       for (i = 0; i < TX_BUF_COUNT; i++) {
-               void *buf;
-
-               buf = (void *)get_zeroed_page(GFP_KERNEL);
-               if (buf == NULL) {
-                       err = -ENOMEM;
-                       while (--i >= 0)
-                               free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-                       goto err_free_rx;
-               }
-               tx_desc[i].buf_addr = virt_to_phys(buf);
-       }
-
-       /* 256 entries, ring status set means 'empty', base address 0x0000.  */
-       ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
-       ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
-       ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
-
-       /* 256 entries, ring status set means 'full', base address 0x0400.  */
-       ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
-       ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
-       ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
-
-       for (i = 0; i < RX_BUF_COUNT; i++) {
-               ixp2000_reg_write(RING_RX_PENDING,
-                       RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
-       }
-
-       ixp2000_uengine_load(0, &ixp2400_rx);
-       ixp2000_uengine_start_contexts(0, 0xff);
-
-       /* 256 entries, ring status set means 'empty', base address 0x0800.  */
-       ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
-       ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
-       ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
-
-       /* 256 entries, ring status set means 'full', base address 0x0c00.  */
-       ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
-       ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
-       ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
-
-       ixp2000_uengine_load(1, &ixp2400_tx);
-       ixp2000_uengine_start_contexts(1, 0xff);
-
-       for (i = 0; i < nds_count; i++) {
-               err = register_netdev(nds[i]);
-               if (err) {
-                       while (--i >= 0)
-                               unregister_netdev(nds[i]);
-                       goto err_free_tx;
-               }
-       }
-
-       for (i = 0; i < nds_count; i++) {
-               printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n",
-                                nds[i]->name, i, nds[i]->dev_addr);
-       }
-
-       return 0;
-
-err_free_tx:
-       for (i = 0; i < TX_BUF_COUNT; i++)
-               free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-
-err_free_rx:
-       for (i = 0; i < RX_BUF_COUNT; i++)
-               free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-
-err_out:
-       return err;
-} 
-
-void ixpdev_deinit(void)
-{
-       int i;
-
-       /* @@@ Flush out pending packets.  */
-
-       for (i = 0; i < nds_count; i++)
-               unregister_netdev(nds[i]);
-
-       ixp2000_uengine_stop_contexts(1, 0xff);
-       ixp2000_uengine_stop_contexts(0, 0xff);
-       ixp2000_uengine_reset(0x3);
-
-       for (i = 0; i < TX_BUF_COUNT; i++)
-               free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-
-       for (i = 0; i < RX_BUF_COUNT; i++)
-               free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
deleted file mode 100644 (file)
index 391ece6..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __IXPDEV_H
-#define __IXPDEV_H
-
-struct ixpdev_priv
-{
-       struct net_device *dev;
-       struct napi_struct napi;
-       int     channel;
-       int     tx_queue_entries;
-};
-
-struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
-int ixpdev_init(int num_ports, struct net_device **nds,
-               void (*set_port_admin_status)(int port, int up));
-void ixpdev_deinit(void);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
deleted file mode 100644 (file)
index 86aa08e..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __IXPDEV_PRIV_H
-#define __IXPDEV_PRIV_H
-
-#define RX_BUF_DESC_BASE       0x00001000
-#define RX_BUF_COUNT           ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
-#define TX_BUF_DESC_BASE       0x00002000
-#define TX_BUF_COUNT           ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
-#define TX_BUF_COUNT_PER_CHAN  (TX_BUF_COUNT / 4)
-
-#define RING_RX_PENDING                ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
-#define RING_RX_DONE           ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
-#define RING_TX_PENDING                ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
-#define RING_TX_DONE           ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
-
-#define SCRATCH_REG(x)         ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
-#define RING_RX_PENDING_BASE   SCRATCH_REG(0x00)
-#define RING_RX_PENDING_HEAD   SCRATCH_REG(0x04)
-#define RING_RX_PENDING_TAIL   SCRATCH_REG(0x08)
-#define RING_RX_DONE_BASE      SCRATCH_REG(0x10)
-#define RING_RX_DONE_HEAD      SCRATCH_REG(0x14)
-#define RING_RX_DONE_TAIL      SCRATCH_REG(0x18)
-#define RING_TX_PENDING_BASE   SCRATCH_REG(0x20)
-#define RING_TX_PENDING_HEAD   SCRATCH_REG(0x24)
-#define RING_TX_PENDING_TAIL   SCRATCH_REG(0x28)
-#define RING_TX_DONE_BASE      SCRATCH_REG(0x30)
-#define RING_TX_DONE_HEAD      SCRATCH_REG(0x34)
-#define RING_TX_DONE_TAIL      SCRATCH_REG(0x38)
-
-struct ixpdev_rx_desc
-{
-       u32     buf_addr;
-       u32     buf_length;
-       u32     channel;
-       u32     pkt_length;
-};
-
-struct ixpdev_tx_desc
-{
-       u32     buf_addr;
-       u32     pkt_length;
-       u32     channel;
-       u32     unused;
-};
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
deleted file mode 100644 (file)
index e08d3f9..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Helper functions for the PM3386s on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <asm/io.h>
-#include "pm3386.h"
-
-/*
- * Read from register 'reg' of PM3386 device 'pm'.
- */
-static u16 pm3386_reg_read(int pm, int reg)
-{
-       void *_reg;
-       u16 value;
-
-       _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
-       if (pm == 1)
-               _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
-
-       value = *((volatile u16 *)(_reg + (reg << 1)));
-
-//     printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
-
-       return value;
-}
-
-/*
- * Write to register 'reg' of PM3386 device 'pm', and perform
- * a readback from the identification register.
- */
-static void pm3386_reg_write(int pm, int reg, u16 value)
-{
-       void *_reg;
-       u16 dummy;
-
-//     printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
-
-       _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
-       if (pm == 1)
-               _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
-
-       *((volatile u16 *)(_reg + (reg << 1))) = value;
-
-       dummy = *((volatile u16 *)_reg);
-       __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
-}
-
-/*
- * Read from port 'port' register 'reg', where the registers
- * for the different ports are 'spacing' registers apart.
- */
-static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
-{
-       int reg;
-
-       reg = _reg;
-       if (port & 1)
-               reg += spacing;
-
-       return pm3386_reg_read(port >> 1, reg);
-}
-
-/*
- * Write to port 'port' register 'reg', where the registers
- * for the different ports are 'spacing' registers apart.
- */
-static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
-{
-       int reg;
-
-       reg = _reg;
-       if (port & 1)
-               reg += spacing;
-
-       pm3386_reg_write(port >> 1, reg, value);
-}
-
-int pm3386_secondary_present(void)
-{
-       return pm3386_reg_read(1, 0) == 0x3386;
-}
-
-void pm3386_reset(void)
-{
-       u8 mac[3][6];
-       int secondary;
-
-       secondary = pm3386_secondary_present();
-
-       /* Save programmed MAC addresses.  */
-       pm3386_get_mac(0, mac[0]);
-       pm3386_get_mac(1, mac[1]);
-       if (secondary)
-               pm3386_get_mac(2, mac[2]);
-
-       /* Assert analog and digital reset.  */
-       pm3386_reg_write(0, 0x002, 0x0060);
-       if (secondary)
-               pm3386_reg_write(1, 0x002, 0x0060);
-       mdelay(1);
-
-       /* Deassert analog reset.  */
-       pm3386_reg_write(0, 0x002, 0x0062);
-       if (secondary)
-               pm3386_reg_write(1, 0x002, 0x0062);
-       mdelay(10);
-
-       /* Deassert digital reset.  */
-       pm3386_reg_write(0, 0x002, 0x0063);
-       if (secondary)
-               pm3386_reg_write(1, 0x002, 0x0063);
-       mdelay(10);
-
-       /* Restore programmed MAC addresses.  */
-       pm3386_set_mac(0, mac[0]);
-       pm3386_set_mac(1, mac[1]);
-       if (secondary)
-               pm3386_set_mac(2, mac[2]);
-
-       /* Disable carrier on all ports.  */
-       pm3386_set_carrier(0, 0);
-       pm3386_set_carrier(1, 0);
-       if (secondary)
-               pm3386_set_carrier(2, 0);
-}
-
-static u16 swaph(u16 x)
-{
-       return ((x << 8) | (x >> 8)) & 0xffff;
-}
-
-int pm3386_port_count(void)
-{
-       return 2 + pm3386_secondary_present();
-}
-
-void pm3386_init_port(int port)
-{
-       int pm = port >> 1;
-
-       /*
-        * Work around ENP2611 bootloader programming MAC address
-        * in reverse.
-        */
-       if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
-           (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
-               u16 temp[3];
-
-               temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
-               temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
-               temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
-               pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
-               pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
-               pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
-       }
-
-       /*
-        * Initialise narrowbanding mode.  See application note 2010486
-        * for more information.  (@@@ We also need to issue a reset
-        * when ROOL or DOOL are detected.)
-        */
-       pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
-       udelay(500);
-       pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
-
-       /*
-        * SPI-3 ingress block.  Set 64 bytes SPI-3 burst size
-        * towards SPI-3 bridge.
-        */
-       pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
-
-       /*
-        * Enable ingress protocol checking, and soft reset the
-        * SPI-3 ingress block.
-        */
-       pm3386_reg_write(pm, 0x103, 0x0003);
-       while (!(pm3386_reg_read(pm, 0x103) & 0x80))
-               ;
-
-       /*
-        * SPI-3 egress block.  Gather 12288 bytes of the current
-        * packet in the TX fifo before initiating transmit on the
-        * SERDES interface.  (Prevents TX underflows.)
-        */
-       pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
-
-       /*
-        * Enforce odd parity from the SPI-3 bridge, and soft reset
-        * the SPI-3 egress block.
-        */
-       pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
-       while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
-               ;
-
-       /*
-        * EGMAC block.  Set this channels to reject long preambles,
-        * not send or transmit PAUSE frames, enable preamble checking,
-        * disable frame length checking, enable FCS appending, enable
-        * TX frame padding.
-        */
-       pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
-
-       /*
-        * Soft reset the EGMAC block.
-        */
-       pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
-       pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
-
-       /*
-        * Auto-sense autonegotiation status.
-        */
-       pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
-
-       /*
-        * Allow reception of jumbo frames.
-        */
-       pm3386_port_reg_write(port, 0x310, 0x100, 9018);
-
-       /*
-        * Allow transmission of jumbo frames.
-        */
-       pm3386_port_reg_write(port, 0x336, 0x100, 9018);
-
-       /* @@@ Should set 0x337/0x437 (RX forwarding threshold.)  */
-
-       /*
-        * Set autonegotiation parameters to 'no PAUSE, full duplex.'
-        */
-       pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
-
-       /*
-        * Enable and restart autonegotiation.
-        */
-       pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
-       pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
-}
-
-void pm3386_get_mac(int port, u8 *mac)
-{
-       u16 temp;
-
-       temp = pm3386_port_reg_read(port, 0x308, 0x100);
-       mac[0] = temp & 0xff;
-       mac[1] = (temp >> 8) & 0xff;
-
-       temp = pm3386_port_reg_read(port, 0x309, 0x100);
-       mac[2] = temp & 0xff;
-       mac[3] = (temp >> 8) & 0xff;
-
-       temp = pm3386_port_reg_read(port, 0x30a, 0x100);
-       mac[4] = temp & 0xff;
-       mac[5] = (temp >> 8) & 0xff;
-}
-
-void pm3386_set_mac(int port, u8 *mac)
-{
-       pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
-       pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
-       pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
-}
-
-static u32 pm3386_get_stat(int port, u16 base)
-{
-       u32 value;
-
-       value = pm3386_port_reg_read(port, base, 0x100);
-       value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
-
-       return value;
-}
-
-void pm3386_get_stats(int port, struct net_device_stats *stats)
-{
-       /*
-        * Snapshot statistics counters.
-        */
-       pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
-       while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
-               ;
-
-       memset(stats, 0, sizeof(*stats));
-
-       stats->rx_packets = pm3386_get_stat(port, 0x510);
-       stats->tx_packets = pm3386_get_stat(port, 0x590);
-       stats->rx_bytes = pm3386_get_stat(port, 0x514);
-       stats->tx_bytes = pm3386_get_stat(port, 0x594);
-       /* @@@ Add other stats.  */
-}
-
-void pm3386_set_carrier(int port, int state)
-{
-       pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
-}
-
-int pm3386_is_link_up(int port)
-{
-       u16 temp;
-
-       temp = pm3386_port_reg_read(port, 0x31a, 0x100);
-       temp = pm3386_port_reg_read(port, 0x31a, 0x100);
-
-       return !!(temp & 0x0002);
-}
-
-void pm3386_enable_rx(int port)
-{
-       u16 temp;
-
-       temp = pm3386_port_reg_read(port, 0x303, 0x100);
-       temp |= 0x1000;
-       pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_disable_rx(int port)
-{
-       u16 temp;
-
-       temp = pm3386_port_reg_read(port, 0x303, 0x100);
-       temp &= 0xefff;
-       pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_enable_tx(int port)
-{
-       u16 temp;
-
-       temp = pm3386_port_reg_read(port, 0x303, 0x100);
-       temp |= 0x4000;
-       pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_disable_tx(int port)
-{
-       u16 temp;
-
-       temp = pm3386_port_reg_read(port, 0x303, 0x100);
-       temp &= 0xbfff;
-       pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
deleted file mode 100644 (file)
index cc4183d..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Helper functions for the PM3386s on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __PM3386_H
-#define __PM3386_H
-
-void pm3386_reset(void);
-int pm3386_port_count(void);
-void pm3386_init_port(int port);
-void pm3386_get_mac(int port, u8 *mac);
-void pm3386_set_mac(int port, u8 *mac);
-void pm3386_get_stats(int port, struct net_device_stats *stats);
-void pm3386_set_carrier(int port, int state);
-int pm3386_is_link_up(int port);
-void pm3386_enable_rx(int port);
-void pm3386_disable_rx(int port);
-void pm3386_enable_tx(int port);
-void pm3386_disable_tx(int port);
-
-
-#endif
index 41a8b5a9849e00f8e03cfa1c541f2134e3af6557..482648fcf0b6327bb9c482c1c36880ece1627caa 100644 (file)
@@ -1002,12 +1002,41 @@ static int ixp4xx_nway_reset(struct net_device *dev)
        return phy_start_aneg(port->phydev);
 }
 
+int ixp46x_phc_index = -1;
+
+static int ixp4xx_get_ts_info(struct net_device *dev,
+                             struct ethtool_ts_info *info)
+{
+       if (!cpu_is_ixp46x()) {
+               info->so_timestamping =
+                       SOF_TIMESTAMPING_TX_SOFTWARE |
+                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                       SOF_TIMESTAMPING_SOFTWARE;
+               info->phc_index = -1;
+               return 0;
+       }
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_HARDWARE |
+               SOF_TIMESTAMPING_RX_HARDWARE |
+               SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->phc_index = ixp46x_phc_index;
+       info->tx_types =
+               (1 << HWTSTAMP_TX_OFF) |
+               (1 << HWTSTAMP_TX_ON);
+       info->rx_filters =
+               (1 << HWTSTAMP_FILTER_NONE) |
+               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
+       return 0;
+}
+
 static const struct ethtool_ops ixp4xx_ethtool_ops = {
        .get_drvinfo = ixp4xx_get_drvinfo,
        .get_settings = ixp4xx_get_settings,
        .set_settings = ixp4xx_set_settings,
        .nway_reset = ixp4xx_nway_reset,
        .get_link = ethtool_op_get_link,
+       .get_ts_info = ixp4xx_get_ts_info,
 };
 
 
index 168c8f41d09f534986977a3d673663b74e433cdf..b6a2bdeff59521c84ed1f56eec393b943ca67c6c 100644 (file)
@@ -113,10 +113,9 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
 
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-       if (pci_request_regions(pdev, "rrunner")) {
-               ret = -EIO;
+       ret = pci_request_regions(pdev, "rrunner");
+       if (ret < 0)
                goto out;
-       }
 
        pci_set_drvdata(pdev, dev);
 
@@ -124,11 +123,8 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
 
        spin_lock_init(&rrpriv->lock);
 
-       dev->irq = pdev->irq;
        dev->netdev_ops = &rr_netdev_ops;
 
-       dev->base_addr = pci_resource_start(pdev, 0);
-
        /* display version info if adapter is found */
        if (!version_disp) {
                /* set display flag to TRUE so that */
@@ -146,16 +142,14 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
        pci_set_master(pdev);
 
        printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
-              "at 0x%08lx, irq %i, PCI latency %i\n", dev->name,
-              dev->base_addr, dev->irq, pci_latency);
+              "at 0x%08llx, irq %i, PCI latency %i\n", dev->name,
+              pci_resource_start(pdev, 0), pdev->irq, pci_latency);
 
        /*
-        * Remap the regs into kernel space.
+        * Remap the MMIO regs into kernel space.
         */
-
-       rrpriv->regs = ioremap(dev->base_addr, 0x1000);
-
-       if (!rrpriv->regs){
+       rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
+       if (!rrpriv->regs) {
                printk(KERN_ERR "%s:  Unable to map I/O register, "
                        "RoadRunner will be disabled.\n", dev->name);
                ret = -EIO;
@@ -202,8 +196,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
 
        rr_init(dev);
 
-       dev->base_addr = 0;
-
        ret = register_netdev(dev);
        if (ret)
                goto out;
@@ -217,7 +209,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
                pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
                                    rrpriv->tx_ring_dma);
        if (rrpriv->regs)
-               iounmap(rrpriv->regs);
+               pci_iounmap(pdev, rrpriv->regs);
        if (pdev) {
                pci_release_regions(pdev);
                pci_set_drvdata(pdev, NULL);
@@ -231,29 +223,26 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
 static void __devexit rr_remove_one (struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rr_private *rr = netdev_priv(dev);
 
-       if (dev) {
-               struct rr_private *rr = netdev_priv(dev);
-
-               if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){
-                       printk(KERN_ERR "%s: trying to unload running NIC\n",
-                              dev->name);
-                       writel(HALT_NIC, &rr->regs->HostCtrl);
-               }
-
-               pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
-                                   rr->evt_ring_dma);
-               pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
-                                   rr->rx_ring_dma);
-               pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
-                                   rr->tx_ring_dma);
-               unregister_netdev(dev);
-               iounmap(rr->regs);
-               free_netdev(dev);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
+       if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
+               printk(KERN_ERR "%s: trying to unload running NIC\n",
+                      dev->name);
+               writel(HALT_NIC, &rr->regs->HostCtrl);
        }
+
+       unregister_netdev(dev);
+       pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
+                           rr->evt_ring_dma);
+       pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
+                           rr->rx_ring_dma);
+       pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
+                           rr->tx_ring_dma);
+       pci_iounmap(pdev, rr->regs);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       free_netdev(dev);
 }
 
 
@@ -1229,9 +1218,9 @@ static int rr_open(struct net_device *dev)
        readl(&regs->HostCtrl);
        spin_unlock_irqrestore(&rrpriv->lock, flags);
 
-       if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
+       if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
                printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
-                      dev->name, dev->irq);
+                      dev->name, pdev->irq);
                ecode = -EAGAIN;
                goto error;
        }
@@ -1338,16 +1327,15 @@ static void rr_dump(struct net_device *dev)
 
 static int rr_close(struct net_device *dev)
 {
-       struct rr_private *rrpriv;
-       struct rr_regs __iomem *regs;
+       struct rr_private *rrpriv = netdev_priv(dev);
+       struct rr_regs __iomem *regs = rrpriv->regs;
+       struct pci_dev *pdev = rrpriv->pci_dev;
        unsigned long flags;
        u32 tmp;
        short i;
 
        netif_stop_queue(dev);
 
-       rrpriv = netdev_priv(dev);
-       regs = rrpriv->regs;
 
        /*
         * Lock to make sure we are not cleaning up while another CPU
@@ -1386,15 +1374,15 @@ static int rr_close(struct net_device *dev)
        rr_raz_tx(rrpriv, dev);
        rr_raz_rx(rrpriv, dev);
 
-       pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl),
+       pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
                            rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
        rrpriv->rx_ctrl = NULL;
 
-       pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info),
-                           rrpriv->info, rrpriv->info_dma);
+       pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
+                           rrpriv->info_dma);
        rrpriv->info = NULL;
 
-       free_irq(dev->irq, dev);
+       free_irq(pdev->irq, dev);
        spin_unlock_irqrestore(&rrpriv->lock, flags);
 
        return 0;
index d025c83cd12a09fb4f701abce880f92aaed47b05..8b919471472fb1dba4d34ffcf0bfe5b4c723af7b 100644 (file)
@@ -428,6 +428,24 @@ int netvsc_device_remove(struct hv_device *device)
        return 0;
 }
 
+
+#define RING_AVAIL_PERCENT_HIWATER 20
+#define RING_AVAIL_PERCENT_LOWATER 10
+
+/*
+ * Get the percentage of available bytes to write in the ring.
+ * The return value is in range from 0 to 100.
+ */
+static inline u32 hv_ringbuf_avail_percent(
+               struct hv_ring_buffer_info *ring_info)
+{
+       u32 avail_read, avail_write;
+
+       hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+
+       return avail_write * 100 / ring_info->ring_datasize;
+}
+
 static void netvsc_send_completion(struct hv_device *device,
                                   struct vmpacket_descriptor *packet)
 {
@@ -455,6 +473,8 @@ static void netvsc_send_completion(struct hv_device *device,
                complete(&net_device->channel_init_wait);
        } else if (nvsp_packet->hdr.msg_type ==
                   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
+               int num_outstanding_sends;
+
                /* Get the send context */
                nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
                        packet->trans_id;
@@ -463,10 +483,14 @@ static void netvsc_send_completion(struct hv_device *device,
                nvsc_packet->completion.send.send_completion(
                        nvsc_packet->completion.send.send_completion_ctx);
 
-               atomic_dec(&net_device->num_outstanding_sends);
+               num_outstanding_sends =
+                       atomic_dec_return(&net_device->num_outstanding_sends);
 
-               if (netif_queue_stopped(ndev) && !net_device->start_remove)
-                       netif_wake_queue(ndev);
+               if (netif_queue_stopped(ndev) && !net_device->start_remove &&
+                       (hv_ringbuf_avail_percent(&device->channel->outbound)
+                       > RING_AVAIL_PERCENT_HIWATER ||
+                       num_outstanding_sends < 1))
+                               netif_wake_queue(ndev);
        } else {
                netdev_err(ndev, "Unknown send completion packet type- "
                           "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@ int netvsc_send(struct hv_device *device,
 
        if (ret == 0) {
                atomic_inc(&net_device->num_outstanding_sends);
+               if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+                       RING_AVAIL_PERCENT_LOWATER) {
+                       netif_stop_queue(ndev);
+                       if (atomic_read(&net_device->
+                               num_outstanding_sends) < 1)
+                               netif_wake_queue(ndev);
+               }
        } else if (ret == -EAGAIN) {
                netif_stop_queue(ndev);
-               if (atomic_read(&net_device->num_outstanding_sends) < 1)
+               if (atomic_read(&net_device->num_outstanding_sends) < 1) {
                        netif_wake_queue(ndev);
+                       ret = -ENOSPC;
+               }
        } else {
                netdev_err(ndev, "Unable to send packet %p ret %d\n",
                           packet, ret);
index dd294783b5c5b77c3d48ecd14067921d9e90d4bb..a0cc12786be441c9f1cabad455834c329b669929 100644 (file)
@@ -224,9 +224,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                net->stats.tx_packets++;
        } else {
                kfree(packet);
+               if (ret != -EAGAIN) {
+                       dev_kfree_skb_any(skb);
+                       net->stats.tx_dropped++;
+               }
        }
 
-       return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+       return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
 }
 
 /*
index f975afdc315ce507b5a5b80c04325a224849c320..b17fc900709997863ce064b0e007f3014ae2d291 100644 (file)
@@ -773,7 +773,8 @@ static int macvlan_fill_info(struct sk_buff *skb,
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
 
-       NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode);
+       if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index e16f98cb4f04733c962aa9219d407c3066a4ef64..cd802eb25fd2f898de3e566d410d1656d5b68d33 100644 (file)
@@ -39,10 +39,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
                MII_BCM63XX_IR_SPEED |
                MII_BCM63XX_IR_LINK) |
                MII_BCM63XX_IR_EN;
-       err = phy_write(phydev, MII_BCM63XX_IR, reg);
-       if (err < 0)
-               return err;
-       return 0;
+       return phy_write(phydev, MII_BCM63XX_IR, reg);
 }
 
 static int bcm63xx_ack_interrupt(struct phy_device *phydev)
index 2f774acdb55192f0df5086e6d2a51ef193bf3cfc..5f59cc0647786209916201356d716c3568a1ac14 100644 (file)
@@ -134,12 +134,7 @@ static int dm9161_config_init(struct phy_device *phydev)
                return err;
 
        /* Reconnect the PHY, and enable Autonegotiation */
-       err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
-
-       if (err < 0)
-               return err;
-
-       return 0;
+       return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
 }
 
 static int dm9161_ack_interrupt(struct phy_device *phydev)
index dd7ae19579d178eaddc51ddc3654fcbafc132a79..940b29022d0cef97a6e5181803439610e1cfc7cb 100644 (file)
@@ -1215,6 +1215,36 @@ static void dp83640_txtstamp(struct phy_device *phydev,
        }
 }
 
+static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
+{
+       struct dp83640_private *dp83640 = dev->priv;
+
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_HARDWARE |
+               SOF_TIMESTAMPING_RX_HARDWARE |
+               SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock);
+       info->tx_types =
+               (1 << HWTSTAMP_TX_OFF) |
+               (1 << HWTSTAMP_TX_ON) |
+               (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+       info->rx_filters =
+               (1 << HWTSTAMP_FILTER_NONE) |
+               (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+       return 0;
+}
+
 static struct phy_driver dp83640_driver = {
        .phy_id         = DP83640_PHY_ID,
        .phy_id_mask    = 0xfffffff0,
@@ -1225,6 +1255,7 @@ static struct phy_driver dp83640_driver = {
        .remove         = dp83640_remove,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
+       .ts_info        = dp83640_ts_info,
        .hwtstamp       = dp83640_hwtstamp,
        .rxtstamp       = dp83640_rxtstamp,
        .txtstamp       = dp83640_txtstamp,
index e8b9c53c304b63d2ba2b4a530504e5fafb08f6dc..418928d644bfffa4698c000fe13d71e67bcc2d6d 100644 (file)
@@ -455,11 +455,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-       if (err < 0)
-               return err;
-
-       return 0;
+       return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -515,11 +511,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-       if (err < 0)
-               return err;
-
-       return 0;
+       return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1149_config_init(struct phy_device *phydev)
@@ -545,11 +537,7 @@ static int m88e1149_config_init(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-       if (err < 0)
-               return err;
-
-       return 0;
+       return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1145_config_init(struct phy_device *phydev)
index 885dbdd9c39e0f9ea1101d7a5642fc52d6a081a7..72b50f57e7b28df4a0be367c219dc2cee3022739 100644 (file)
@@ -116,8 +116,8 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
        int i;
 
        rcu_read_lock();
-       for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
-            i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
+       i = 1;
+       for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
                sock = rcu_dereference(callid_sock[i]);
                if (!sock)
                        continue;
index 248a144033cacd3df323d8014e259fd730ce8dfc..89024d5fc33a9d8f0ab8922127cc852ce1567962 100644 (file)
@@ -40,4 +40,15 @@ config NET_TEAM_MODE_ACTIVEBACKUP
          To compile this team mode as a module, choose M here: the module
          will be called team_mode_activebackup.
 
+config NET_TEAM_MODE_LOADBALANCE
+       tristate "Load-balance mode support"
+       depends on NET_TEAM
+       ---help---
+         This mode provides load balancing functionality. Tx port selection
+         is done using BPF function set up from userspace (bpf_hash_func
+         option)
+
+         To compile this team mode as a module, choose M here: the module
+         will be called team_mode_loadbalance.
+
 endif # NET_TEAM
index 85f2028a87afb9d6442e8db74e6be156fbc632b7..fb9f4c1c51ff312fc2c7bca0fa8eda3f8a0ed063 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_NET_TEAM) += team.o
 obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
 obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
+obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
index 8f81805c6825d6e1aec84068bbf3f533e1c33582..153a62d03c9f9c4b2b3be4af17a5d98474d9de01 100644 (file)
@@ -65,7 +65,7 @@ static int __set_port_mac(struct net_device *port_dev,
        return dev_set_mac_address(port_dev, &addr);
 }
 
-int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_mac(struct team_port *port)
 {
        return __set_port_mac(port->dev, port->orig.dev_addr);
 }
@@ -76,12 +76,26 @@ int team_port_set_team_mac(struct team_port *port)
 }
 EXPORT_SYMBOL(team_port_set_team_mac);
 
+static void team_refresh_port_linkup(struct team_port *port)
+{
+       port->linkup = port->user.linkup_enabled ? port->user.linkup :
+                                                  port->state.linkup;
+}
 
 /*******************
  * Options handling
  *******************/
 
-struct team_option *__team_find_option(struct team *team, const char *opt_name)
+struct team_option_inst { /* One for each option instance */
+       struct list_head list;
+       struct team_option *option;
+       struct team_port *port; /* != NULL if per-port */
+       bool changed;
+       bool removed;
+};
+
+static struct team_option *__team_find_option(struct team *team,
+                                             const char *opt_name)
 {
        struct team_option *option;
 
@@ -92,9 +106,121 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name)
        return NULL;
 }
 
-int __team_options_register(struct team *team,
-                           const struct team_option *option,
-                           size_t option_count)
+static int __team_option_inst_add(struct team *team, struct team_option *option,
+                                 struct team_port *port)
+{
+       struct team_option_inst *opt_inst;
+
+       opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
+       if (!opt_inst)
+               return -ENOMEM;
+       opt_inst->option = option;
+       opt_inst->port = port;
+       opt_inst->changed = true;
+       opt_inst->removed = false;
+       list_add_tail(&opt_inst->list, &team->option_inst_list);
+       return 0;
+}
+
+static void __team_option_inst_del(struct team_option_inst *opt_inst)
+{
+       list_del(&opt_inst->list);
+       kfree(opt_inst);
+}
+
+static void __team_option_inst_del_option(struct team *team,
+                                         struct team_option *option)
+{
+       struct team_option_inst *opt_inst, *tmp;
+
+       list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
+               if (opt_inst->option == option)
+                       __team_option_inst_del(opt_inst);
+       }
+}
+
+static int __team_option_inst_add_option(struct team *team,
+                                        struct team_option *option)
+{
+       struct team_port *port;
+       int err;
+
+       if (!option->per_port)
+               return __team_option_inst_add(team, option, 0);
+
+       list_for_each_entry(port, &team->port_list, list) {
+               err = __team_option_inst_add(team, option, port);
+               if (err)
+                       goto inst_del_option;
+       }
+       return 0;
+
+inst_del_option:
+       __team_option_inst_del_option(team, option);
+       return err;
+}
+
+static void __team_option_inst_mark_removed_option(struct team *team,
+                                                  struct team_option *option)
+{
+       struct team_option_inst *opt_inst;
+
+       list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+               if (opt_inst->option == option) {
+                       opt_inst->changed = true;
+                       opt_inst->removed = true;
+               }
+       }
+}
+
+static void __team_option_inst_del_port(struct team *team,
+                                       struct team_port *port)
+{
+       struct team_option_inst *opt_inst, *tmp;
+
+       list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
+               if (opt_inst->option->per_port &&
+                   opt_inst->port == port)
+                       __team_option_inst_del(opt_inst);
+       }
+}
+
+static int __team_option_inst_add_port(struct team *team,
+                                      struct team_port *port)
+{
+       struct team_option *option;
+       int err;
+
+       list_for_each_entry(option, &team->option_list, list) {
+               if (!option->per_port)
+                       continue;
+               err = __team_option_inst_add(team, option, port);
+               if (err)
+                       goto inst_del_port;
+       }
+       return 0;
+
+inst_del_port:
+       __team_option_inst_del_port(team, port);
+       return err;
+}
+
+static void __team_option_inst_mark_removed_port(struct team *team,
+                                                struct team_port *port)
+{
+       struct team_option_inst *opt_inst;
+
+       list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+               if (opt_inst->port == port) {
+                       opt_inst->changed = true;
+                       opt_inst->removed = true;
+               }
+       }
+}
+
+static int __team_options_register(struct team *team,
+                                  const struct team_option *option,
+                                  size_t option_count)
 {
        int i;
        struct team_option **dst_opts;
@@ -107,26 +233,32 @@ int __team_options_register(struct team *team,
        for (i = 0; i < option_count; i++, option++) {
                if (__team_find_option(team, option->name)) {
                        err = -EEXIST;
-                       goto rollback;
+                       goto alloc_rollback;
                }
                dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
                if (!dst_opts[i]) {
                        err = -ENOMEM;
-                       goto rollback;
+                       goto alloc_rollback;
                }
        }
 
        for (i = 0; i < option_count; i++) {
-               dst_opts[i]->changed = true;
-               dst_opts[i]->removed = false;
+               err = __team_option_inst_add_option(team, dst_opts[i]);
+               if (err)
+                       goto inst_rollback;
                list_add_tail(&dst_opts[i]->list, &team->option_list);
        }
 
        kfree(dst_opts);
        return 0;
 
-rollback:
-       for (i = 0; i < option_count; i++)
+inst_rollback:
+       for (i--; i >= 0; i--)
+               __team_option_inst_del_option(team, dst_opts[i]);
+
+       i = option_count - 1;
+alloc_rollback:
+       for (i--; i >= 0; i--)
                kfree(dst_opts[i]);
 
        kfree(dst_opts);
@@ -143,10 +275,8 @@ static void __team_options_mark_removed(struct team *team,
                struct team_option *del_opt;
 
                del_opt = __team_find_option(team, option->name);
-               if (del_opt) {
-                       del_opt->changed = true;
-                       del_opt->removed = true;
-               }
+               if (del_opt)
+                       __team_option_inst_mark_removed_option(team, del_opt);
        }
 }
 
@@ -161,6 +291,7 @@ static void __team_options_unregister(struct team *team,
 
                del_opt = __team_find_option(team, option->name);
                if (del_opt) {
+                       __team_option_inst_del_option(team, del_opt);
                        list_del(&del_opt->list);
                        kfree(del_opt);
                }
@@ -193,22 +324,42 @@ void team_options_unregister(struct team *team,
 }
 EXPORT_SYMBOL(team_options_unregister);
 
-static int team_option_get(struct team *team, struct team_option *option,
-                          void *arg)
+static int team_option_port_add(struct team *team, struct team_port *port)
 {
-       return option->getter(team, arg);
+       int err;
+
+       err = __team_option_inst_add_port(team, port);
+       if (err)
+               return err;
+       __team_options_change_check(team);
+       return 0;
 }
 
-static int team_option_set(struct team *team, struct team_option *option,
-                          void *arg)
+static void team_option_port_del(struct team *team, struct team_port *port)
+{
+       __team_option_inst_mark_removed_port(team, port);
+       __team_options_change_check(team);
+       __team_option_inst_del_port(team, port);
+}
+
+static int team_option_get(struct team *team,
+                          struct team_option_inst *opt_inst,
+                          struct team_gsetter_ctx *ctx)
+{
+       return opt_inst->option->getter(team, ctx);
+}
+
+static int team_option_set(struct team *team,
+                          struct team_option_inst *opt_inst,
+                          struct team_gsetter_ctx *ctx)
 {
        int err;
 
-       err = option->setter(team, arg);
+       err = opt_inst->option->setter(team, ctx);
        if (err)
                return err;
 
-       option->changed = true;
+       opt_inst->changed = true;
        __team_options_change_check(team);
        return err;
 }
@@ -642,6 +793,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_handler_register;
        }
 
+       err = team_option_port_add(team, port);
+       if (err) {
+               netdev_err(dev, "Device %s failed to add per-port options\n",
+                          portname);
+               goto err_option_port_add;
+       }
+
        team_port_list_add_port(team, port);
        team_adjust_ops(team);
        __team_compute_features(team);
@@ -651,6 +809,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
 
        return 0;
 
+err_option_port_add:
+       netdev_rx_handler_unregister(port_dev);
+
 err_handler_register:
        netdev_set_master(port_dev, NULL);
 
@@ -690,6 +851,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
        __team_port_change_check(port, false);
        team_port_list_del_port(team, port);
        team_adjust_ops(team);
+       team_option_port_del(team, port);
        netdev_rx_handler_unregister(port_dev);
        netdev_set_master(port_dev, NULL);
        vlan_vids_del_by_dev(port_dev, dev);
@@ -712,19 +874,49 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
 
 static const char team_no_mode_kind[] = "*NOMODE*";
 
-static int team_mode_option_get(struct team *team, void *arg)
+static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+       ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
+       return 0;
+}
+
+static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
+{
+       return team_change_mode(team, ctx->data.str_val);
+}
+
+static int team_user_linkup_option_get(struct team *team,
+                                      struct team_gsetter_ctx *ctx)
+{
+       ctx->data.bool_val = ctx->port->user.linkup;
+       return 0;
+}
+
+static int team_user_linkup_option_set(struct team *team,
+                                      struct team_gsetter_ctx *ctx)
+{
+       ctx->port->user.linkup = ctx->data.bool_val;
+       team_refresh_port_linkup(ctx->port);
+       return 0;
+}
+
+static int team_user_linkup_en_option_get(struct team *team,
+                                         struct team_gsetter_ctx *ctx)
 {
-       const char **str = arg;
+       struct team_port *port = ctx->port;
 
-       *str = team->mode ? team->mode->kind : team_no_mode_kind;
+       ctx->data.bool_val = port->user.linkup_enabled;
        return 0;
 }
 
-static int team_mode_option_set(struct team *team, void *arg)
+static int team_user_linkup_en_option_set(struct team *team,
+                                         struct team_gsetter_ctx *ctx)
 {
-       const char **str = arg;
+       struct team_port *port = ctx->port;
 
-       return team_change_mode(team, *str);
+       port->user.linkup_enabled = ctx->data.bool_val;
+       team_refresh_port_linkup(ctx->port);
+       return 0;
 }
 
 static const struct team_option team_options[] = {
@@ -734,6 +926,20 @@ static const struct team_option team_options[] = {
                .getter = team_mode_option_get,
                .setter = team_mode_option_set,
        },
+       {
+               .name = "user_linkup",
+               .type = TEAM_OPTION_TYPE_BOOL,
+               .per_port = true,
+               .getter = team_user_linkup_option_get,
+               .setter = team_user_linkup_option_set,
+       },
+       {
+               .name = "user_linkup_enabled",
+               .type = TEAM_OPTION_TYPE_BOOL,
+               .per_port = true,
+               .getter = team_user_linkup_en_option_get,
+               .setter = team_user_linkup_en_option_set,
+       },
 };
 
 static int team_init(struct net_device *dev)
@@ -756,6 +962,7 @@ static int team_init(struct net_device *dev)
        team_adjust_ops(team);
 
        INIT_LIST_HEAD(&team->option_list);
+       INIT_LIST_HEAD(&team->option_inst_list);
        err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
        if (err)
                goto err_options_register;
@@ -1145,10 +1352,7 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
        },
        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
-       [TEAM_ATTR_OPTION_DATA] = {
-               .type = NLA_BINARY,
-               .len = TEAM_STRING_MAX_LEN,
-       },
+       [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
 };
 
 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
@@ -1241,46 +1445,86 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
 {
        struct nlattr *option_list;
        void *hdr;
-       struct team_option *option;
+       struct team_option_inst *opt_inst;
+       int err;
 
        hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
                          TEAM_CMD_OPTIONS_GET);
        if (IS_ERR(hdr))
                return PTR_ERR(hdr);
 
-       NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+       if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+               goto nla_put_failure;
        option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
        if (!option_list)
                return -EMSGSIZE;
 
-       list_for_each_entry(option, &team->option_list, list) {
+       list_for_each_entry(opt_inst, &team->option_inst_list, list) {
                struct nlattr *option_item;
-               long arg;
+               struct team_option *option = opt_inst->option;
+               struct team_gsetter_ctx ctx;
 
                /* Include only changed options if fill all mode is not on */
-               if (!fillall && !option->changed)
+               if (!fillall && !opt_inst->changed)
                        continue;
                option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
                if (!option_item)
                        goto nla_put_failure;
-               NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
-               if (option->changed) {
-                       NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
-                       option->changed = false;
+               if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
+                       goto nla_put_failure;
+               if (opt_inst->changed) {
+                       if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
+                               goto nla_put_failure;
+                       opt_inst->changed = false;
                }
-               if (option->removed)
-                       NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
+               if (opt_inst->removed &&
+                   nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
+                       goto nla_put_failure;
+               if (opt_inst->port &&
+                   nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
+                               opt_inst->port->dev->ifindex))
+                       goto nla_put_failure;
+               ctx.port = opt_inst->port;
                switch (option->type) {
                case TEAM_OPTION_TYPE_U32:
-                       NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
-                       team_option_get(team, option, &arg);
-                       NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+                       if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
+                               goto nla_put_failure;
+                       err = team_option_get(team, opt_inst, &ctx);
+                       if (err)
+                               goto errout;
+                       if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
+                                       ctx.data.u32_val))
+                               goto nla_put_failure;
                        break;
                case TEAM_OPTION_TYPE_STRING:
-                       NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
-                       team_option_get(team, option, &arg);
-                       NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
-                                      (char *) arg);
+                       if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
+                               goto nla_put_failure;
+                       err = team_option_get(team, opt_inst, &ctx);
+                       if (err)
+                               goto errout;
+                       if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
+                                          ctx.data.str_val))
+                               goto nla_put_failure;
+                       break;
+               case TEAM_OPTION_TYPE_BINARY:
+                       if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
+                               goto nla_put_failure;
+                       err = team_option_get(team, opt_inst, &ctx);
+                       if (err)
+                               goto errout;
+                       if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
+                                   ctx.data.bin_val.len, ctx.data.bin_val.ptr))
+                               goto nla_put_failure;
+                       break;
+               case TEAM_OPTION_TYPE_BOOL:
+                       if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
+                               goto nla_put_failure;
+                       err = team_option_get(team, opt_inst, &ctx);
+                       if (err)
+                               goto errout;
+                       if (ctx.data.bool_val &&
+                           nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
+                               goto nla_put_failure;
                        break;
                default:
                        BUG();
@@ -1292,8 +1536,10 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
        return genlmsg_end(skb, hdr);
 
 nla_put_failure:
+       err = -EMSGSIZE;
+errout:
        genlmsg_cancel(skb, hdr);
-       return -EMSGSIZE;
+       return err;
 }
 
 static int team_nl_fill_options_get_all(struct sk_buff *skb,
@@ -1339,9 +1585,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
        }
 
        nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
-               struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+               struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
+               struct nlattr *attr_port_ifindex;
+               struct nlattr *attr_data;
                enum team_option_type opt_type;
-               struct team_option *option;
+               int opt_port_ifindex = 0; /* != 0 for per-port options */
+               struct team_option_inst *opt_inst;
                char *opt_name;
                bool opt_found = false;
 
@@ -1349,48 +1598,78 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
                        err = -EINVAL;
                        goto team_put;
                }
-               err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+               err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
                                       nl_option, team_nl_option_policy);
                if (err)
                        goto team_put;
-               if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
-                   !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
-                   !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+               if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
+                   !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
                        err = -EINVAL;
                        goto team_put;
                }
-               switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+               switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
                case NLA_U32:
                        opt_type = TEAM_OPTION_TYPE_U32;
                        break;
                case NLA_STRING:
                        opt_type = TEAM_OPTION_TYPE_STRING;
                        break;
+               case NLA_BINARY:
+                       opt_type = TEAM_OPTION_TYPE_BINARY;
+                       break;
+               case NLA_FLAG:
+                       opt_type = TEAM_OPTION_TYPE_BOOL;
+                       break;
                default:
                        goto team_put;
                }
 
-               opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
-               list_for_each_entry(option, &team->option_list, list) {
-                       long arg;
-                       struct nlattr *opt_data_attr;
+               attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
+               if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
+                       err = -EINVAL;
+                       goto team_put;
+               }
+
+               opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
+               attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
+               if (attr_port_ifindex)
+                       opt_port_ifindex = nla_get_u32(attr_port_ifindex);
+
+               list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+                       struct team_option *option = opt_inst->option;
+                       struct team_gsetter_ctx ctx;
+                       int tmp_ifindex;
 
+                       tmp_ifindex = opt_inst->port ?
+                                     opt_inst->port->dev->ifindex : 0;
                        if (option->type != opt_type ||
-                           strcmp(option->name, opt_name))
+                           strcmp(option->name, opt_name) ||
+                           tmp_ifindex != opt_port_ifindex)
                                continue;
                        opt_found = true;
-                       opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+                       ctx.port = opt_inst->port;
                        switch (opt_type) {
                        case TEAM_OPTION_TYPE_U32:
-                               arg = nla_get_u32(opt_data_attr);
+                               ctx.data.u32_val = nla_get_u32(attr_data);
                                break;
                        case TEAM_OPTION_TYPE_STRING:
-                               arg = (long) nla_data(opt_data_attr);
+                               if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
+                                       err = -EINVAL;
+                                       goto team_put;
+                               }
+                               ctx.data.str_val = nla_data(attr_data);
+                               break;
+                       case TEAM_OPTION_TYPE_BINARY:
+                               ctx.data.bin_val.len = nla_len(attr_data);
+                               ctx.data.bin_val.ptr = nla_data(attr_data);
+                               break;
+                       case TEAM_OPTION_TYPE_BOOL:
+                               ctx.data.bool_val = attr_data ? true : false;
                                break;
                        default:
                                BUG();
                        }
-                       err = team_option_set(team, option, &arg);
+                       err = team_option_set(team, opt_inst, &ctx);
                        if (err)
                                goto team_put;
                }
@@ -1420,7 +1699,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
        if (IS_ERR(hdr))
                return PTR_ERR(hdr);
 
-       NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+       if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+               goto nla_put_failure;
        port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
        if (!port_list)
                return -EMSGSIZE;
@@ -1434,17 +1714,20 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
                port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
                if (!port_item)
                        goto nla_put_failure;
-               NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+               if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
+                       goto nla_put_failure;
                if (port->changed) {
-                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+                       if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
+                               goto nla_put_failure;
                        port->changed = false;
                }
-               if (port->removed)
-                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
-               if (port->linkup)
-                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
-               NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
-               NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+               if ((port->removed &&
+                    nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
+                   (port->state.linkup &&
+                    nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
+                   nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
+                   nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
+                       goto nla_put_failure;
                nla_nest_end(skb, port_item);
        }
 
@@ -1603,23 +1886,24 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
 {
        int err;
 
-       if (!port->removed && port->linkup == linkup)
+       if (!port->removed && port->state.linkup == linkup)
                return;
 
        port->changed = true;
-       port->linkup = linkup;
+       port->state.linkup = linkup;
+       team_refresh_port_linkup(port);
        if (linkup) {
                struct ethtool_cmd ecmd;
 
                err = __ethtool_get_settings(port->dev, &ecmd);
                if (!err) {
-                       port->speed = ethtool_cmd_speed(&ecmd);
-                       port->duplex = ecmd.duplex;
+                       port->state.speed = ethtool_cmd_speed(&ecmd);
+                       port->state.duplex = ecmd.duplex;
                        goto send_event;
                }
        }
-       port->speed = 0;
-       port->duplex = 0;
+       port->state.speed = 0;
+       port->state.duplex = 0;
 
 send_event:
        err = team_nl_send_event_port_list_get(port->team);
index f4d960e82e2967561e16a397c44c89312a556222..fd6bd03aaa897b711ef89eda77db98b8395573ac 100644 (file)
@@ -59,23 +59,21 @@ static void ab_port_leave(struct team *team, struct team_port *port)
                RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
 }
 
-static int ab_active_port_get(struct team *team, void *arg)
+static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
 {
-       u32 *ifindex = arg;
-
-       *ifindex = 0;
        if (ab_priv(team)->active_port)
-               *ifindex = ab_priv(team)->active_port->dev->ifindex;
+               ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
+       else
+               ctx->data.u32_val = 0;
        return 0;
 }
 
-static int ab_active_port_set(struct team *team, void *arg)
+static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx)
 {
-       u32 *ifindex = arg;
        struct team_port *port;
 
-       list_for_each_entry_rcu(port, &team->port_list, list) {
-               if (port->dev->ifindex == *ifindex) {
+       list_for_each_entry(port, &team->port_list, list) {
+               if (port->dev->ifindex == ctx->data.u32_val) {
                        rcu_assign_pointer(ab_priv(team)->active_port, port);
                        return 0;
                }
@@ -92,12 +90,12 @@ static const struct team_option ab_options[] = {
        },
 };
 
-int ab_init(struct team *team)
+static int ab_init(struct team *team)
 {
        return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
 }
 
-void ab_exit(struct team *team)
+static void ab_exit(struct team *team)
 {
        team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
 }
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
new file mode 100644 (file)
index 0000000..2b506b2
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
+ * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/if_team.h>
+
+struct lb_priv {
+       struct sk_filter __rcu *fp;
+       struct sock_fprog *orig_fprog;
+};
+
+static struct lb_priv *lb_priv(struct team *team)
+{
+       return (struct lb_priv *) &team->mode_priv;
+}
+
+static bool lb_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct sk_filter *fp;
+       struct team_port *port;
+       unsigned int hash;
+       int port_index;
+
+       fp = rcu_dereference(lb_priv(team)->fp);
+       if (unlikely(!fp))
+               goto drop;
+       hash = SK_RUN_FILTER(fp, skb);
+       port_index = hash % team->port_count;
+       port = team_get_port_by_index_rcu(team, port_index);
+       if (unlikely(!port))
+               goto drop;
+       skb->dev = port->dev;
+       if (dev_queue_xmit(skb))
+               return false;
+       return true;
+
+drop:
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+       if (!lb_priv(team)->orig_fprog) {
+               ctx->data.bin_val.len = 0;
+               ctx->data.bin_val.ptr = NULL;
+               return 0;
+       }
+       ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
+                               sizeof(struct sock_filter);
+       ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
+       return 0;
+}
+
+static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+                         const void *data)
+{
+       struct sock_fprog *fprog;
+       struct sock_filter *filter = (struct sock_filter *) data;
+
+       if (data_len % sizeof(struct sock_filter))
+               return -EINVAL;
+       fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
+       if (!fprog)
+               return -ENOMEM;
+       fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
+       if (!fprog->filter) {
+               kfree(fprog);
+               return -ENOMEM;
+       }
+       fprog->len = data_len / sizeof(struct sock_filter);
+       *pfprog = fprog;
+       return 0;
+}
+
+static void __fprog_destroy(struct sock_fprog *fprog)
+{
+       kfree(fprog->filter);
+       kfree(fprog);
+}
+
+static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
+{
+       struct sk_filter *fp = NULL;
+       struct sock_fprog *fprog = NULL;
+       int err;
+
+       if (ctx->data.bin_val.len) {
+               err = __fprog_create(&fprog, ctx->data.bin_val.len,
+                                    ctx->data.bin_val.ptr);
+               if (err)
+                       return err;
+               err = sk_unattached_filter_create(&fp, fprog);
+               if (err) {
+                       __fprog_destroy(fprog);
+                       return err;
+               }
+       }
+
+       if (lb_priv(team)->orig_fprog) {
+               /* Clear old filter data */
+               __fprog_destroy(lb_priv(team)->orig_fprog);
+               sk_unattached_filter_destroy(lb_priv(team)->fp);
+       }
+
+       rcu_assign_pointer(lb_priv(team)->fp, fp);
+       lb_priv(team)->orig_fprog = fprog;
+       return 0;
+}
+
+static const struct team_option lb_options[] = {
+       {
+               .name = "bpf_hash_func",
+               .type = TEAM_OPTION_TYPE_BINARY,
+               .getter = lb_bpf_func_get,
+               .setter = lb_bpf_func_set,
+       },
+};
+
+static int lb_init(struct team *team)
+{
+       return team_options_register(team, lb_options,
+                                    ARRAY_SIZE(lb_options));
+}
+
+static void lb_exit(struct team *team)
+{
+       team_options_unregister(team, lb_options,
+                               ARRAY_SIZE(lb_options));
+}
+
+static int lb_port_enter(struct team *team, struct team_port *port)
+{
+       return team_port_set_team_mac(port);
+}
+
+static void lb_port_change_mac(struct team *team, struct team_port *port)
+{
+       team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops lb_mode_ops = {
+       .init                   = lb_init,
+       .exit                   = lb_exit,
+       .transmit               = lb_transmit,
+       .port_enter             = lb_port_enter,
+       .port_change_mac        = lb_port_change_mac,
+};
+
+static struct team_mode lb_mode = {
+       .kind           = "loadbalance",
+       .owner          = THIS_MODULE,
+       .priv_size      = sizeof(struct lb_priv),
+       .ops            = &lb_mode_ops,
+};
+
+static int __init lb_init_module(void)
+{
+       return team_mode_register(&lb_mode);
+}
+
+static void __exit lb_cleanup_module(void)
+{
+       team_mode_unregister(&lb_mode);
+}
+
+module_init(lb_init_module);
+module_exit(lb_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Load-balancing mode for team");
+MODULE_ALIAS("team-mode-loadbalance");
index b7b3f5b0d40654c3c50b18ae56c969c03382c71c..db9953630da5bc4224755b91f4c232a8b8dd850b 100644 (file)
@@ -884,6 +884,7 @@ static const struct ethtool_ops usbnet_ethtool_ops = {
        .get_drvinfo            = usbnet_get_drvinfo,
        .get_msglevel           = usbnet_get_msglevel,
        .set_msglevel           = usbnet_set_msglevel,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 /*-------------------------------------------------------------------------*/
index 6675c92b542b4fca19c4af6d1ebd96e6586f046b..acc9aa832f764df7512165f4b1c4b4e9a3050416 100644 (file)
@@ -55,8 +55,9 @@ void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
                ath6kl_warn("failed to allocate testmode rx skb!\n");
                return;
        }
-       NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD);
-       NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf);
+       if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
+           nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
+               goto nla_put_failure;
        cfg80211_testmode_event(skb, GFP_KERNEL);
        return;
 
index f0551f807f6987717f5022152c481e95c6db9607..3c06c6b093e95865f0695ebfc1228cf3dc048de4 100644 (file)
@@ -343,38 +343,50 @@ static struct iw_handler_def ipw2100_wx_handler_def;
 
 static inline void read_register(struct net_device *dev, u32 reg, u32 * val)
 {
-       *val = readl((void __iomem *)(dev->base_addr + reg));
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       *val = ioread32(priv->ioaddr + reg);
        IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
 }
 
 static inline void write_register(struct net_device *dev, u32 reg, u32 val)
 {
-       writel(val, (void __iomem *)(dev->base_addr + reg));
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       iowrite32(val, priv->ioaddr + reg);
        IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
 }
 
 static inline void read_register_word(struct net_device *dev, u32 reg,
                                      u16 * val)
 {
-       *val = readw((void __iomem *)(dev->base_addr + reg));
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       *val = ioread16(priv->ioaddr + reg);
        IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
 }
 
 static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val)
 {
-       *val = readb((void __iomem *)(dev->base_addr + reg));
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       *val = ioread8(priv->ioaddr + reg);
        IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
 }
 
 static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
 {
-       writew(val, (void __iomem *)(dev->base_addr + reg));
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       iowrite16(val, priv->ioaddr + reg);
        IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
 }
 
 static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
 {
-       writeb(val, (void __iomem *)(dev->base_addr + reg));
+       struct ipw2100_priv *priv = libipw_priv(dev);
+
+       iowrite8(val, priv->ioaddr + reg);
        IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
 }
 
@@ -506,13 +518,13 @@ static void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
                read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
 }
 
-static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev)
+static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev)
 {
-       return (dev->base_addr &&
-               (readl
-                ((void __iomem *)(dev->base_addr +
-                                  IPW_REG_DOA_DEBUG_AREA_START))
-                == IPW_DATA_DOA_DEBUG_VALUE));
+       u32 dbg;
+
+       read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg);
+
+       return dbg == IPW_DATA_DOA_DEBUG_VALUE;
 }
 
 static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
@@ -6082,9 +6094,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
 /* Look into using netdev destructor to shutdown libipw? */
 
 static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
-                                              void __iomem * base_addr,
-                                              unsigned long mem_start,
-                                              unsigned long mem_len)
+                                              void __iomem * ioaddr)
 {
        struct ipw2100_priv *priv;
        struct net_device *dev;
@@ -6096,6 +6106,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
        priv->ieee = netdev_priv(dev);
        priv->pci_dev = pci_dev;
        priv->net_dev = dev;
+       priv->ioaddr = ioaddr;
 
        priv->ieee->hard_start_xmit = ipw2100_tx;
        priv->ieee->set_security = shim__set_security;
@@ -6111,10 +6122,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
        dev->watchdog_timeo = 3 * HZ;
        dev->irq = 0;
 
-       dev->base_addr = (unsigned long)base_addr;
-       dev->mem_start = mem_start;
-       dev->mem_end = dev->mem_start + mem_len - 1;
-
        /* NOTE: We don't use the wireless_handlers hook
         * in dev as the system will start throwing WX requests
         * to us before we're actually initialized and it just
@@ -6215,8 +6222,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
 static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
                                const struct pci_device_id *ent)
 {
-       unsigned long mem_start, mem_len, mem_flags;
-       void __iomem *base_addr = NULL;
+       void __iomem *ioaddr;
        struct net_device *dev = NULL;
        struct ipw2100_priv *priv = NULL;
        int err = 0;
@@ -6225,18 +6231,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
 
        IPW_DEBUG_INFO("enter\n");
 
-       mem_start = pci_resource_start(pci_dev, 0);
-       mem_len = pci_resource_len(pci_dev, 0);
-       mem_flags = pci_resource_flags(pci_dev, 0);
-
-       if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
+       if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) {
                IPW_DEBUG_INFO("weird - resource type is not memory\n");
                err = -ENODEV;
-               goto fail;
+               goto out;
        }
 
-       base_addr = ioremap_nocache(mem_start, mem_len);
-       if (!base_addr) {
+       ioaddr = pci_iomap(pci_dev, 0, 0);
+       if (!ioaddr) {
                printk(KERN_WARNING DRV_NAME
                       "Error calling ioremap_nocache.\n");
                err = -EIO;
@@ -6244,7 +6246,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        }
 
        /* allocate and initialize our net_device */
-       dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len);
+       dev = ipw2100_alloc_device(pci_dev, ioaddr);
        if (!dev) {
                printk(KERN_WARNING DRV_NAME
                       "Error calling ipw2100_alloc_device.\n");
@@ -6379,8 +6381,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        priv->status |= STATUS_INITIALIZED;
 
        mutex_unlock(&priv->action_mutex);
-
-       return 0;
+out:
+       return err;
 
       fail_unlock:
        mutex_unlock(&priv->action_mutex);
@@ -6409,63 +6411,56 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
                pci_set_drvdata(pci_dev, NULL);
        }
 
-       if (base_addr)
-               iounmap(base_addr);
+       pci_iounmap(pci_dev, ioaddr);
 
        pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
-
-       return err;
+       goto out;
 }
 
 static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
 {
        struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
-       struct net_device *dev;
+       struct net_device *dev = priv->net_dev;
 
-       if (priv) {
-               mutex_lock(&priv->action_mutex);
+       mutex_lock(&priv->action_mutex);
 
-               priv->status &= ~STATUS_INITIALIZED;
+       priv->status &= ~STATUS_INITIALIZED;
 
-               dev = priv->net_dev;
-               sysfs_remove_group(&pci_dev->dev.kobj,
-                                  &ipw2100_attribute_group);
+       sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
 
 #ifdef CONFIG_PM
-               if (ipw2100_firmware.version)
-                       ipw2100_release_firmware(priv, &ipw2100_firmware);
+       if (ipw2100_firmware.version)
+               ipw2100_release_firmware(priv, &ipw2100_firmware);
 #endif
-               /* Take down the hardware */
-               ipw2100_down(priv);
+       /* Take down the hardware */
+       ipw2100_down(priv);
 
-               /* Release the mutex so that the network subsystem can
-                * complete any needed calls into the driver... */
-               mutex_unlock(&priv->action_mutex);
+       /* Release the mutex so that the network subsystem can
+        * complete any needed calls into the driver... */
+       mutex_unlock(&priv->action_mutex);
 
-               /* Unregister the device first - this results in close()
-                * being called if the device is open.  If we free storage
-                * first, then close() will crash. */
-               unregister_netdev(dev);
+       /* Unregister the device first - this results in close()
+        * being called if the device is open.  If we free storage
+        * first, then close() will crash.
+        * FIXME: remove the comment above. */
+       unregister_netdev(dev);
 
-               ipw2100_kill_works(priv);
+       ipw2100_kill_works(priv);
 
-               ipw2100_queues_free(priv);
+       ipw2100_queues_free(priv);
 
-               /* Free potential debugging firmware snapshot */
-               ipw2100_snapshot_free(priv);
+       /* Free potential debugging firmware snapshot */
+       ipw2100_snapshot_free(priv);
 
-               if (dev->irq)
-                       free_irq(dev->irq, priv);
+       free_irq(dev->irq, priv);
 
-               if (dev->base_addr)
-                       iounmap((void __iomem *)dev->base_addr);
+       pci_iounmap(pci_dev, priv->ioaddr);
 
-               /* wiphy_unregister needs to be here, before free_libipw */
-               wiphy_unregister(priv->ieee->wdev.wiphy);
-               kfree(priv->ieee->bg_band.channels);
-               free_libipw(dev, 0);
-       }
+       /* wiphy_unregister needs to be here, before free_libipw */
+       wiphy_unregister(priv->ieee->wdev.wiphy);
+       kfree(priv->ieee->bg_band.channels);
+       free_libipw(dev, 0);
 
        pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
@@ -8609,7 +8604,7 @@ static int ipw2100_ucode_download(struct ipw2100_priv *priv,
        struct net_device *dev = priv->net_dev;
        const unsigned char *microcode_data = fw->uc.data;
        unsigned int microcode_data_left = fw->uc.size;
-       void __iomem *reg = (void __iomem *)dev->base_addr;
+       void __iomem *reg = priv->ioaddr;
 
        struct symbol_alive_response response;
        int i, j;
index 99cba968aa58d94469f0d34ffa95de65c643c07c..e5b1c77ae0ebbc7e910875fc790a09152b54a06b 100644 (file)
@@ -488,6 +488,7 @@ enum {
 #define CAP_PRIVACY_ON          (1<<1) /* Off = No privacy */
 
 struct ipw2100_priv {
+       void __iomem *ioaddr;
 
        int stop_hang_check;    /* Set 1 when shutting down to kill hang_check */
        int stop_rf_kill;       /* Set 1 when shutting down to kill rf_kill */
index 2b022571a8595a5dd41c89bdd515d0f1fd0dc70b..57af0fc76d123591b7ca65ae5d3fa0ce0807c531 100644 (file)
@@ -11826,10 +11826,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
        net_dev->wireless_data = &priv->wireless_data;
        net_dev->wireless_handlers = &ipw_wx_handler_def;
        net_dev->ethtool_ops = &ipw_ethtool_ops;
-       net_dev->irq = pdev->irq;
-       net_dev->base_addr = (unsigned long)priv->hw_base;
-       net_dev->mem_start = pci_resource_start(pdev, 0);
-       net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
 
        err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
        if (err) {
index 76f7f925143614c351c45bfc0e590809dcc4e638..a54e20e7b17f6f7dc7da6631ca610d6e95c12b70 100644 (file)
@@ -184,9 +184,10 @@ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
                         "Run out of memory for messages to user space ?\n");
                return;
        }
-       NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
-       /* the length doesn't include len_n_flags field, so add it manually */
-       NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data);
+       if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+           /* the length doesn't include len_n_flags field, so add it manually */
+           nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
+               goto nla_put_failure;
        cfg80211_testmode_event(skb, GFP_ATOMIC);
        return;
 
@@ -314,8 +315,9 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
        memcpy(reply_buf, &(pkt->hdr), reply_len);
        iwl_free_resp(&cmd);
 
-       NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
-       NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf);
+       if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+           nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
+               goto nla_put_failure;
        return cfg80211_testmode_reply(skb);
 
 nla_put_failure:
@@ -379,7 +381,8 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
                        IWL_ERR(priv, "Memory allocation fail\n");
                        return -ENOMEM;
                }
-               NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+               if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
+                       goto nla_put_failure;
                status = cfg80211_testmode_reply(skb);
                if (status < 0)
                        IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -478,10 +481,11 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                        IWL_ERR(priv, "Memory allocation fail\n");
                        return -ENOMEM;
                }
-               NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-                           IWL_TM_CMD_DEV2APP_SYNC_RSP);
-               NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP,
-                       rsp_data_len, rsp_data_ptr);
+               if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+                               IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
+                   nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
+                           rsp_data_len, rsp_data_ptr))
+                       goto nla_put_failure;
                status = cfg80211_testmode_reply(skb);
                if (status < 0)
                        IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -536,11 +540,12 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                                IWL_ERR(priv, "Memory allocation fail\n");
                                return -ENOMEM;
                        }
-                       NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-                               IWL_TM_CMD_DEV2APP_EEPROM_RSP);
-                       NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
-                               cfg(priv)->base_params->eeprom_size,
-                               priv->shrd->eeprom);
+                       if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+                                       IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
+                           nla_put(skb, IWL_TM_ATTR_EEPROM,
+                                   cfg(priv)->base_params->eeprom_size,
+                                   priv->shrd->eeprom))
+                               goto nla_put_failure;
                        status = cfg80211_testmode_reply(skb);
                        if (status < 0)
                                IWL_ERR(priv, "Error sending msg : %d\n",
@@ -566,8 +571,9 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                        IWL_ERR(priv, "Memory allocation fail\n");
                        return -ENOMEM;
                }
-               NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION,
-                           priv->fw->ucode_ver);
+               if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
+                               priv->fw->ucode_ver))
+                       goto nla_put_failure;
                status = cfg80211_testmode_reply(skb);
                if (status < 0)
                        IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -582,7 +588,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                        IWL_ERR(priv, "Memory allocation fail\n");
                        return -ENOMEM;
                }
-               NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+               if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
+                       goto nla_put_failure;
                status = cfg80211_testmode_reply(skb);
                if (status < 0)
                        IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -602,9 +609,10 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                        inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
                        data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
                }
-               NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type);
-               NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size);
-               NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size);
+               if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type) ||
+                   nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
+                   nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
+                       goto nla_put_failure;
                status = cfg80211_testmode_reply(skb);
                if (status < 0)
                        IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -678,9 +686,10 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
                        iwl_trace_cleanup(priv);
                        return -ENOMEM;
                }
-               NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
-                       sizeof(priv->testmode_trace.dma_addr),
-                       (u64 *)&priv->testmode_trace.dma_addr);
+               if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
+                           sizeof(priv->testmode_trace.dma_addr),
+                           (u64 *)&priv->testmode_trace.dma_addr))
+                       goto nla_put_failure;
                status = cfg80211_testmode_reply(skb);
                if (status < 0) {
                        IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -725,9 +734,10 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
                        length = priv->testmode_trace.buff_size %
                                DUMP_CHUNK_SIZE;
 
-               NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
-                       priv->testmode_trace.trace_addr +
-                       (DUMP_CHUNK_SIZE * idx));
+               if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
+                           priv->testmode_trace.trace_addr +
+                           (DUMP_CHUNK_SIZE * idx)))
+                       goto nla_put_failure;
                idx++;
                cb->args[4] = idx;
                return 0;
@@ -922,9 +932,10 @@ static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
                        length = priv->testmode_mem.buff_size %
                                DUMP_CHUNK_SIZE;
 
-               NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
-                       priv->testmode_mem.buff_addr +
-                       (DUMP_CHUNK_SIZE * idx));
+               if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
+                           priv->testmode_mem.buff_addr +
+                           (DUMP_CHUNK_SIZE * idx)))
+                       goto nla_put_failure;
                idx++;
                cb->args[4] = idx;
                return 0;
index b7ce6a6e355f68c30d2c55cb772dfd8d5414af57..538783f51989e46454b590c007ccf2a657a86566 100644 (file)
@@ -582,11 +582,13 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
        }
 
-       NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
-                    sizeof(struct mac_address), data->addresses[1].addr);
+       if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+                   sizeof(struct mac_address), data->addresses[1].addr))
+               goto nla_put_failure;
 
        /* We get the skb->data */
-       NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data);
+       if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
+               goto nla_put_failure;
 
        /* We get the flags for this transmission, and we translate them to
           wmediumd flags  */
@@ -597,7 +599,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
        if (info->flags & IEEE80211_TX_CTL_NO_ACK)
                hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
 
-       NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags);
+       if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
+               goto nla_put_failure;
 
        /* We get the tx control (rate and retries) info*/
 
@@ -606,12 +609,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                tx_attempts[i].count = info->status.rates[i].count;
        }
 
-       NLA_PUT(skb, HWSIM_ATTR_TX_INFO,
-                    sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
-                    tx_attempts);
+       if (nla_put(skb, HWSIM_ATTR_TX_INFO,
+                   sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
+                   tx_attempts))
+               goto nla_put_failure;
 
        /* We create a cookie to identify this skb */
-       NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb);
+       if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb))
+               goto nla_put_failure;
 
        genlmsg_end(skb, msg_head);
        genlmsg_unicast(&init_net, skb, dst_pid);
@@ -1108,7 +1113,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
                                                nla_total_size(sizeof(u32)));
                if (!skb)
                        return -ENOMEM;
-               NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps);
+               if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
+                       goto nla_put_failure;
                return cfg80211_testmode_reply(skb);
        default:
                return -EOPNOTSUPP;
index 1e93bb9c0246cfa02e44f6160851909b422b14ec..b41428f5b3b254ff32125bac149130f252731a69 100644 (file)
@@ -116,7 +116,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
                        goto out_sleep;
                }
 
-               NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
+               if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
+                       goto nla_put_failure;
                ret = cfg80211_testmode_reply(skb);
                if (ret < 0)
                        goto out_sleep;
@@ -178,7 +179,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
                goto out_free;
        }
 
-       NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+       if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
+               goto nla_put_failure;
        ret = cfg80211_testmode_reply(skb);
        if (ret < 0)
                goto out_free;
@@ -297,7 +299,8 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
                goto out;
        }
 
-       NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr);
+       if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
+               goto nla_put_failure;
        ret = cfg80211_testmode_reply(skb);
        if (ret < 0)
                goto out;
index f519a131238d7df6a38cd7326c36f2f2cc4369d7..1e528b539a07f202a04b9e9dbdfbffbac6656703 100644 (file)
@@ -304,6 +304,12 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
 }
 EXPORT_SYMBOL(ptp_clock_event);
 
+int ptp_clock_index(struct ptp_clock *ptp)
+{
+       return ptp->index;
+}
+EXPORT_SYMBOL(ptp_clock_index);
+
 /* module operations */
 
 static void __exit ptp_exit(void)
index 6f2782bb5f41e036ac9b66c06eb6e9df9169db55..e03c40692b0073106f845f9adbb1d38250590e76 100644 (file)
@@ -284,6 +284,7 @@ static void __exit ptp_ixp_exit(void)
 {
        free_irq(MASTER_IRQ, &ixp_clock);
        free_irq(SLAVE_IRQ, &ixp_clock);
+       ixp46x_phc_index = -1;
        ptp_clock_unregister(ixp_clock.ptp_clock);
 }
 
@@ -302,6 +303,8 @@ static int __init ptp_ixp_init(void)
        if (IS_ERR(ixp_clock.ptp_clock))
                return PTR_ERR(ixp_clock.ptp_clock);
 
+       ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock);
+
        __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend);
        __raw_writel(1, &ixp_clock.regs->trgt_lo);
        __raw_writel(0, &ixp_clock.regs->trgt_hi);
index 65a2562f66b4c8340e48fff4dfdb725cb0698ef7..6bb43382f3f3d92d376ba83bbf24024c38d00407 100644 (file)
@@ -67,6 +67,17 @@ struct ieee_ets {
        __u8    reco_prio_tc[IEEE_8021QAZ_MAX_TCS];
 };
 
+/* This structure contains rate limit extension to the IEEE 802.1Qaz ETS
+ * managed object.
+ * Values are 64 bits long and specified in Kbps to enable usage over both
+ * slow and very fast networks.
+ *
+ * @tc_maxrate: maximal tc tx bandwidth indexed by traffic class
+ */
+struct ieee_maxrate {
+       __u64   tc_maxrate[IEEE_8021QAZ_MAX_TCS];
+};
+
 /* This structure contains the IEEE 802.1Qaz PFC managed object
  *
  * @pfc_cap: Indicates the number of traffic classes on the local device
@@ -321,6 +332,7 @@ enum ieee_attrs {
        DCB_ATTR_IEEE_PEER_ETS,
        DCB_ATTR_IEEE_PEER_PFC,
        DCB_ATTR_IEEE_PEER_APP,
+       DCB_ATTR_IEEE_MAXRATE,
        __DCB_ATTR_IEEE_MAX
 };
 #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
index f5647b59a90e6ade8f551096c6e7bc19193156ae..89d68d837b6e7dc167e727f3d137882efb593cac 100644 (file)
@@ -726,6 +726,29 @@ struct ethtool_sfeatures {
        struct ethtool_set_features_block features[0];
 };
 
+/**
+ * struct ethtool_ts_info - holds a device's timestamping and PHC association
+ * @cmd: command number = %ETHTOOL_GET_TS_INFO
+ * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
+ * @phc_index: device index of the associated PHC, or -1 if there is none
+ * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ *
+ * The bits in the 'tx_types' and 'rx_filters' fields correspond to
+ * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
+ * respectively.  For example, if the device supports HWTSTAMP_TX_ON,
+ * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ */
+struct ethtool_ts_info {
+       __u32   cmd;
+       __u32   so_timestamping;
+       __s32   phc_index;
+       __u32   tx_types;
+       __u32   tx_reserved[3];
+       __u32   rx_filters;
+       __u32   rx_reserved[3];
+};
+
 /*
  * %ETHTOOL_SFEATURES changes features present in features[].valid to the
  * values of corresponding bits in features[].requested. Bits in .requested
@@ -788,6 +811,7 @@ struct net_device;
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
 
 /**
  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
@@ -893,6 +917,9 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
  *                and flag of the device.
  * @get_dump_data: Get dump data.
  * @set_dump: Set dump specific flags to the device.
+ * @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ *     Drivers supporting transmit time stamps in software should set this to
+ *     ethtool_op_get_ts_info().
  *
  * All operations are optional (i.e. the function pointer may be set
  * to %NULL) and callers must take this into account.  Callers must
@@ -954,6 +981,7 @@ struct ethtool_ops {
        int     (*get_dump_data)(struct net_device *,
                                 struct ethtool_dump *, void *);
        int     (*set_dump)(struct net_device *, struct ethtool_dump *);
+       int     (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
 
 };
 #endif /* __KERNEL__ */
@@ -1028,6 +1056,7 @@ struct ethtool_ops {
 #define ETHTOOL_SET_DUMP       0x0000003e /* Set dump settings */
 #define ETHTOOL_GET_DUMP_FLAG  0x0000003f /* Get dump settings */
 #define ETHTOOL_GET_DUMP_DATA  0x00000040 /* Get dump data */
+#define ETHTOOL_GET_TS_INFO    0x00000041 /* Get time stamping and PHC info */
 
 /* compatibility with older code */
 #define SPARC_ETH_GSET         ETHTOOL_GSET
index 8eeb205f298b078ccbaa714b2d171a50da40096d..72090994d789c235c7f73545cec3bc136ff57600 100644 (file)
@@ -126,7 +126,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
 #define SKF_AD_HATYPE  28
 #define SKF_AD_RXHASH  32
 #define SKF_AD_CPU     36
-#define SKF_AD_MAX     40
+#define SKF_AD_ALU_XOR_X       40
+#define SKF_AD_MAX     44
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
@@ -153,6 +154,9 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp)
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
 extern unsigned int sk_run_filter(const struct sk_buff *skb,
                                  const struct sock_filter *filter);
+extern int sk_unattached_filter_create(struct sk_filter **pfp,
+                                      struct sock_fprog *fprog);
+extern void sk_unattached_filter_destroy(struct sk_filter *fp);
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
@@ -228,6 +232,7 @@ enum {
        BPF_S_ANC_HATYPE,
        BPF_S_ANC_RXHASH,
        BPF_S_ANC_CPU,
+       BPF_S_ANC_ALU_XOR_X,
 };
 
 #endif /* __KERNEL__ */
index 5852545e6bba77423c16e4577efc40c4450131a9..6af8738ae7e976855271e4226fac24896c901a71 100644 (file)
@@ -274,6 +274,33 @@ struct hv_ring_buffer_debug_info {
        u32 bytes_avail_towrite;
 };
 
+
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static inline void
+hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+                         u32 *read, u32 *write)
+{
+       u32 read_loc, write_loc, dsize;
+
+       smp_read_barrier_depends();
+
+       /* Capture the read/write indices before they changed */
+       read_loc = rbi->ring_buffer->read_index;
+       write_loc = rbi->ring_buffer->write_index;
+       dsize = rbi->ring_datasize;
+
+       *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+               read_loc - write_loc;
+       *read = dsize - *write;
+}
+
+
 /*
  * We use the same version numbering for all Hyper-V modules.
  *
index 4b24ff453aee56029a2b2e0b5ee233b9afab6977..2f4fa93454c7ec4ba1494a74838605a737e9fc13 100644 (file)
@@ -138,6 +138,8 @@ enum {
        IFLA_GROUP,             /* Group the device belongs to */
        IFLA_NET_NS_FD,
        IFLA_EXT_MASK,          /* Extended info mask, VFs, etc */
+       IFLA_PROMISCUITY,       /* Promiscuity count: > 0 means acts PROMISC */
+#define IFLA_PROMISCUITY IFLA_PROMISCUITY
        __IFLA_MAX
 };
 
index 58404b0c50101e93b2978d6d0807447f08e4a049..5fd5ab171165617c5169f1fdc4dc88cfb5749c2a 100644 (file)
@@ -33,6 +33,24 @@ struct team_port {
        struct team *team;
        int index;
 
+       bool linkup; /* either state.linkup or user.linkup */
+
+       struct {
+               bool linkup;
+               u32 speed;
+               u8 duplex;
+       } state;
+
+       /* Values set by userspace */
+       struct {
+               bool linkup;
+               bool linkup_enabled;
+       } user;
+
+       /* Custom gennetlink interface related flags */
+       bool changed;
+       bool removed;
+
        /*
         * A place for storing original values of the device before it
         * become a port.
@@ -42,14 +60,6 @@ struct team_port {
                unsigned int mtu;
        } orig;
 
-       bool linkup;
-       u32 speed;
-       u8 duplex;
-
-       /* Custom gennetlink interface related flags */
-       bool changed;
-       bool removed;
-
        struct rcu_head rcu;
 };
 
@@ -68,18 +78,30 @@ struct team_mode_ops {
 enum team_option_type {
        TEAM_OPTION_TYPE_U32,
        TEAM_OPTION_TYPE_STRING,
+       TEAM_OPTION_TYPE_BINARY,
+       TEAM_OPTION_TYPE_BOOL,
+};
+
+struct team_gsetter_ctx {
+       union {
+               u32 u32_val;
+               const char *str_val;
+               struct {
+                       const void *ptr;
+                       u32 len;
+               } bin_val;
+               bool bool_val;
+       } data;
+       struct team_port *port;
 };
 
 struct team_option {
        struct list_head list;
        const char *name;
+       bool per_port;
        enum team_option_type type;
-       int (*getter)(struct team *team, void *arg);
-       int (*setter)(struct team *team, void *arg);
-
-       /* Custom gennetlink interface related flags */
-       bool changed;
-       bool removed;
+       int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+       int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
 };
 
 struct team_mode {
@@ -110,6 +132,7 @@ struct team {
        struct list_head port_list;
 
        struct list_head option_list;
+       struct list_head option_inst_list; /* list of option instances */
 
        const struct team_mode *mode;
        struct team_mode_ops ops;
@@ -216,6 +239,7 @@ enum {
        TEAM_ATTR_OPTION_TYPE,          /* u8 */
        TEAM_ATTR_OPTION_DATA,          /* dynamic */
        TEAM_ATTR_OPTION_REMOVED,       /* flag */
+       TEAM_ATTR_OPTION_PORT_IFINDEX,  /* u32 */ /* for per-port options */
 
        __TEAM_ATTR_OPTION_MAX,
        TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
index 9958ff2cad3c624f7f6c84457dffa71f217d5a44..1f3860a8a1099ea1288cbd618cf160290874068e 100644 (file)
@@ -150,6 +150,10 @@ enum {
        /* statistics commands */
        MLX4_CMD_QUERY_IF_STAT   = 0X54,
        MLX4_CMD_SET_IF_STAT     = 0X55,
+
+       /* set port opcode modifiers */
+       MLX4_SET_PORT_PRIO2TC = 0x8,
+       MLX4_SET_PORT_SCHEDULER  = 0x9,
 };
 
 enum {
index 834c96c5d879d27ca673c144505a1c6a8d505b73..6d028247f79dedef0067482c04cf17b408480fd4 100644 (file)
@@ -628,6 +628,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
                          u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
                           u8 promisc);
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+               u8 *pg, u16 *ratelimit);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
index 091f9e7dc8b9629387a3d8e0d4bee9e3f915b702..96005d75893c7b91abcfb6e38ddd3c65e74463ff 100644 (file)
@@ -139,7 +139,8 @@ struct mlx4_qp_path {
        u8                      rgid[16];
        u8                      sched_queue;
        u8                      vlan_index;
-       u8                      reserved3[2];
+       u8                      feup;
+       u8                      reserved3;
        u8                      reserved4[2];
        u8                      dmac[6];
 };
index 2f8e18a232273faa679a1d7ee60944b292f16de6..d6d549cf1f23f9a4ba89aea56394565434ee7c84 100644 (file)
@@ -411,26 +411,32 @@ ip_set_get_h16(const struct nlattr *attr)
 #define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
 #define ipset_nest_end(skb, start)  nla_nest_end(skb, start)
 
-#define NLA_PUT_IPADDR4(skb, type, ipaddr)                     \
-do {                                                           \
-       struct nlattr *__nested = ipset_nest_start(skb, type);  \
-                                                               \
-       if (!__nested)                                          \
-               goto nla_put_failure;                           \
-       NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);     \
-       ipset_nest_end(skb, __nested);                          \
-} while (0)
-
-#define NLA_PUT_IPADDR6(skb, type, ipaddrptr)                  \
-do {                                                           \
-       struct nlattr *__nested = ipset_nest_start(skb, type);  \
-                                                               \
-       if (!__nested)                                          \
-               goto nla_put_failure;                           \
-       NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6,                    \
-               sizeof(struct in6_addr), ipaddrptr);            \
-       ipset_nest_end(skb, __nested);                          \
-} while (0)
+static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
+{
+       struct nlattr *__nested = ipset_nest_start(skb, type);
+       int ret;
+
+       if (!__nested)
+               return -EMSGSIZE;
+       ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+       if (!ret)
+               ipset_nest_end(skb, __nested);
+       return ret;
+}
+
+static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr)
+{
+       struct nlattr *__nested = ipset_nest_start(skb, type);
+       int ret;
+
+       if (!__nested)
+               return -EMSGSIZE;
+       ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
+                     sizeof(struct in6_addr), ipaddrptr);
+       if (!ret)
+               ipset_nest_end(skb, __nested);
+       return ret;
+}
 
 /* Get address from skbuff */
 static inline __be32
index 05a5d72680bed904c23687a69e43a7759bc61848..289b62d9dd1fd272ad03737a1f910f1a916a050e 100644 (file)
@@ -594,17 +594,20 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
-       NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
-                     htonl(jhash_size(h->table->htable_bits)));
-       NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+       if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
+                         htonl(jhash_size(h->table->htable_bits))) ||
+           nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
+               goto nla_put_failure;
 #ifdef IP_SET_HASH_WITH_NETMASK
-       if (h->netmask != HOST_MASK)
-               NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+       if (h->netmask != HOST_MASK &&
+           nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
+               goto nla_put_failure;
 #endif
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
-       if (with_timeout(h->timeout))
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+       if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+           nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
+           (with_timeout(h->timeout) &&
+            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))))
+               goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
        return 0;
index 6fe0a37d4abf3ca87b9e572bf21d21e21f07b34c..f092032f1c98ca7622a2da60415e0090fd3d2964 100644 (file)
@@ -412,6 +412,9 @@ struct phy_driver {
        /* Clears up any memory if needed */
        void (*remove)(struct phy_device *phydev);
 
+       /* Handles ethtool queries for hardware time stamping. */
+       int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
+
        /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
        int  (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
 
diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h
new file mode 100644 (file)
index 0000000..b5d8c19
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Ethernet driver for the WIZnet W5x00 chip.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef PLATFORM_DATA_WIZNET_H
+#define PLATFORM_DATA_WIZNET_H
+
+#include <linux/if_ether.h>
+
+struct wiznet_platform_data {
+       int     link_gpio;
+       u8      mac_addr[ETH_ALEN];
+};
+
+#ifndef CONFIG_WIZNET_BUS_SHIFT
+#define CONFIG_WIZNET_BUS_SHIFT 0
+#endif
+
+#define W5100_BUS_DIRECT_SIZE  (0x8000 << CONFIG_WIZNET_BUS_SHIFT)
+#define W5300_BUS_DIRECT_SIZE  (0x0400 << CONFIG_WIZNET_BUS_SHIFT)
+
+#endif /* PLATFORM_DATA_WIZNET_H */
index dd2e44fba63e769ef6315d654e566f5b178193b1..945704c2ed65307bb8ec47c39cbd24d364b40fa3 100644 (file)
@@ -136,4 +136,12 @@ struct ptp_clock_event {
 extern void ptp_clock_event(struct ptp_clock *ptp,
                            struct ptp_clock_event *event);
 
+/**
+ * ptp_clock_index() - obtain the device index of a PTP clock
+ *
+ * @ptp:    The clock obtained from ptp_clock_register().
+ */
+
+extern int ptp_clock_index(struct ptp_clock *ptp);
+
 #endif
index 0dddc9e42b6bc4d5b2d342ad0d09d3fc502c482b..cf64031863596d37440cfeba6995ec878895ef6d 100644 (file)
 
 #include <linux/platform_device.h>
 
+#define STMMAC_RX_COE_NONE     0
+#define STMMAC_RX_COE_TYPE1    1
+#define STMMAC_RX_COE_TYPE2    2
+
+/* Define the macros for CSR clock range parameters to be passed by
+ * platform code.
+ * This could also be configured at run time using CPU freq framework. */
+
+/* MDC Clock Selection define*/
+#define        STMMAC_CSR_60_100M      0x0     /* MDC = clk_scr_i/42 */
+#define        STMMAC_CSR_100_150M     0x1     /* MDC = clk_scr_i/62 */
+#define        STMMAC_CSR_20_35M       0x2     /* MDC = clk_scr_i/16 */
+#define        STMMAC_CSR_35_60M       0x3     /* MDC = clk_scr_i/26 */
+#define        STMMAC_CSR_150_250M     0x4     /* MDC = clk_scr_i/102 */
+#define        STMMAC_CSR_250_300M     0x5     /* MDC = clk_scr_i/122 */
+
+/* The MDC clock could be set higher than the IEEE 802.3
+ * specified frequency limit 0f 2.5 MHz, by programming a clock divider
+ * of value different than the above defined values. The resultant MDIO
+ * clock frequency of 12.5 MHz is applicable for the interfacing chips
+ * supporting higher MDC clocks.
+ * The MDC clock selection macros need to be defined for MDC clock rate
+ * of 12.5 MHz, corresponding to the following selection.
+ */
+#define STMMAC_CSR_I_4         0x8     /* clk_csr_i/4 */
+#define STMMAC_CSR_I_6         0x9     /* clk_csr_i/6 */
+#define STMMAC_CSR_I_8         0xA     /* clk_csr_i/8 */
+#define STMMAC_CSR_I_10                0xB     /* clk_csr_i/10 */
+#define STMMAC_CSR_I_12                0xC     /* clk_csr_i/12 */
+#define STMMAC_CSR_I_14                0xD     /* clk_csr_i/14 */
+#define STMMAC_CSR_I_16                0xE     /* clk_csr_i/16 */
+#define STMMAC_CSR_I_18                0xF     /* clk_csr_i/18 */
+
+/* AXI DMA Burst length suported */
+#define DMA_AXI_BLEN_4         (1 << 1)
+#define DMA_AXI_BLEN_8         (1 << 2)
+#define DMA_AXI_BLEN_16                (1 << 3)
+#define DMA_AXI_BLEN_32                (1 << 4)
+#define DMA_AXI_BLEN_64                (1 << 5)
+#define DMA_AXI_BLEN_128       (1 << 6)
+#define DMA_AXI_BLEN_256       (1 << 7)
+#define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \
+                       | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
+                       | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+
 /* Platfrom data for platform device structure's platform_data field */
 
 struct stmmac_mdio_bus_data {
@@ -38,16 +83,24 @@ struct stmmac_mdio_bus_data {
        int probed_phy_irq;
 };
 
+struct stmmac_dma_cfg {
+       int pbl;
+       int fixed_burst;
+       int burst_len;
+};
+
 struct plat_stmmacenet_data {
+       char *phy_bus_name;
        int bus_id;
        int phy_addr;
        int interface;
        struct stmmac_mdio_bus_data *mdio_bus_data;
-       int pbl;
+       struct stmmac_dma_cfg *dma_cfg;
        int clk_csr;
        int has_gmac;
        int enh_desc;
        int tx_coe;
+       int rx_coe;
        int bugged_jumbo;
        int pmt;
        int force_sf_dma_mode;
index f55c980d8e23d14b42bfdfa583dccdb76aa4d303..fc5d5dcebb00e88447444cf95ff525e57363a940 100644 (file)
@@ -48,6 +48,8 @@ struct dcbnl_rtnl_ops {
        /* IEEE 802.1Qaz std */
        int (*ieee_getets) (struct net_device *, struct ieee_ets *);
        int (*ieee_setets) (struct net_device *, struct ieee_ets *);
+       int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
+       int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
        int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_getapp) (struct net_device *, struct dcb_app *);
index 75d615649071e39688b7f12a0e8a9c4066b8b31d..ce70a581d95c84a397dfd7e509a9428632a68cc5 100644 (file)
@@ -41,7 +41,6 @@ struct net;
 
 extern void    icmp_send(struct sk_buff *skb_in,  int type, int code, __be32 info);
 extern int     icmp_rcv(struct sk_buff *skb);
-extern int     icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int     icmp_init(void);
 extern void    icmp_out_count(struct net *net, unsigned char type);
 
index 6f9c25a76cd1328856293c1af4d9cf8b10188c1c..c02b6ad3f6c589359a9de57f48fa38d683010349 100644 (file)
@@ -34,6 +34,7 @@ enum {
        __ND_OPT_ARRAY_MAX,
        ND_OPT_ROUTE_INFO = 24,         /* RFC4191 */
        ND_OPT_RDNSS = 25,              /* RFC5006 */
+       ND_OPT_DNSSL = 31,              /* RFC6106 */
        __ND_OPT_MAX
 };
 
index f394fe5d764109e61f0493aa178cfd652b14dee1..785f37a3b44ee80e1336d7301bd54c6ff48b7ba7 100644 (file)
  *   nla_put_flag(skb, type)           add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
  *
- * Exceptions Based Attribute Construction:
- *   NLA_PUT(skb, type, len, data)     add attribute to skb
- *   NLA_PUT_U8(skb, type, value)      add u8 attribute to skb
- *   NLA_PUT_U16(skb, type, value)     add u16 attribute to skb
- *   NLA_PUT_U32(skb, type, value)     add u32 attribute to skb
- *   NLA_PUT_U64(skb, type, value)     add u64 attribute to skb
- *   NLA_PUT_STRING(skb, type, str)    add string attribute to skb
- *   NLA_PUT_FLAG(skb, type)           add flag attribute to skb
- *   NLA_PUT_MSECS(skb, type, jiffies) add msecs attribute to skb
- *
- *   The meaning of these functions is equal to their lower case
- *   variants but they jump to the label nla_put_failure in case
- *   of a failure.
- *
  * Nested Attributes Construction:
  *   nla_nest_start(skb, type)         start a nested attribute
  *   nla_nest_end(skb, nla)            finalize a nested attribute
@@ -771,6 +757,39 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
        return nla_put(skb, attrtype, sizeof(u16), &value);
 }
 
+/**
+ * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+       return nla_put(skb, attrtype, sizeof(__be16), &value);
+}
+
+/**
+ * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+       return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+{
+       return nla_put(skb, attrtype, sizeof(__le16), &value);
+}
+
 /**
  * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
@@ -783,7 +802,40 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
 }
 
 /**
- * nla_put_64 - Add a u64 netlink attribute to a socket buffer
+ * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+       return nla_put(skb, attrtype, sizeof(__be32), &value);
+}
+
+/**
+ * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+       return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+{
+       return nla_put(skb, attrtype, sizeof(__le32), &value);
+}
+
+/**
+ * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
@@ -793,6 +845,39 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
        return nla_put(skb, attrtype, sizeof(u64), &value);
 }
 
+/**
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+       return nla_put(skb, attrtype, sizeof(__be64), &value);
+}
+
+/**
+ * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+       return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+{
+       return nla_put(skb, attrtype, sizeof(__le64), &value);
+}
+
 /**
  * nla_put_string - Add a string netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
@@ -828,60 +913,6 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
        return nla_put(skb, attrtype, sizeof(u64), &tmp);
 }
 
-#define NLA_PUT(skb, attrtype, attrlen, data) \
-       do { \
-               if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \
-                       goto nla_put_failure; \
-       } while(0)
-
-#define NLA_PUT_TYPE(skb, type, attrtype, value) \
-       do { \
-               type __tmp = value; \
-               NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \
-       } while(0)
-
-#define NLA_PUT_U8(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, u8, attrtype, value)
-
-#define NLA_PUT_U16(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, u16, attrtype, value)
-
-#define NLA_PUT_LE16(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, __le16, attrtype, value)
-
-#define NLA_PUT_BE16(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, __be16, attrtype, value)
-
-#define NLA_PUT_NET16(skb, attrtype, value) \
-       NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_U32(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, u32, attrtype, value)
-
-#define NLA_PUT_BE32(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, __be32, attrtype, value)
-
-#define NLA_PUT_NET32(skb, attrtype, value) \
-       NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_U64(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, u64, attrtype, value)
-
-#define NLA_PUT_BE64(skb, attrtype, value) \
-       NLA_PUT_TYPE(skb, __be64, attrtype, value)
-
-#define NLA_PUT_NET64(skb, attrtype, value) \
-       NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_STRING(skb, attrtype, value) \
-       NLA_PUT(skb, attrtype, strlen(value) + 1, value)
-
-#define NLA_PUT_FLAG(skb, attrtype) \
-       NLA_PUT(skb, attrtype, 0, NULL)
-
-#define NLA_PUT_MSECS(skb, attrtype, jiffies) \
-       NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies))
-
 /**
  * nla_get_u32 - return payload of u32 attribute
  * @nla: u32 netlink attribute
index 96239e78e621fa0654d9e436d8cbe50b8ebff5d0..1cb32bf107de65016cc43ab4f2f59085aeff3879 100644 (file)
@@ -1682,8 +1682,9 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
 
 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
 {
-       if (m->m | m->v)
-               NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
+       if ((m->m | m->v) &&
+           nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 50711368ad6a964726de2b284f84ca7d77468759..708c80ea1874fe7d827e086bdf18c57afe39db03 100644 (file)
@@ -166,11 +166,13 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct nlattr *nest;
        unsigned int i;
 
-       NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
+       if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id))
+               goto nla_put_failure;
        if (vlan->flags) {
                f.flags = vlan->flags;
                f.mask  = ~0;
-               NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f);
+               if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
+                       goto nla_put_failure;
        }
        if (vlan->nr_ingress_mappings) {
                nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
@@ -183,8 +185,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
                        m.from = i;
                        m.to   = vlan->ingress_priority_map[i];
-                       NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
-                               sizeof(m), &m);
+                       if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+                                   sizeof(m), &m))
+                               goto nla_put_failure;
                }
                nla_nest_end(skb, nest);
        }
@@ -202,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
                                m.from = pm->priority;
                                m.to   = (pm->vlan_qos >> 13) & 0x7;
-                               NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
-                                       sizeof(m), &m);
+                               if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+                                           sizeof(m), &m))
+                                       goto nla_put_failure;
                        }
                }
                nla_nest_end(skb, nest);
index bfa9ab93eda566458dcf6fdf72646009e0748548..0301b328cf0fe04cf39f302ab6061bdbc288c42b 100644 (file)
@@ -63,7 +63,7 @@
 #include <net/tcp_states.h>
 #include <net/route.h>
 #include <linux/atalk.h>
-#include "../core/kmap_skb.h"
+#include <linux/highmem.h>
 
 struct datalink_proto *ddp_dl, *aarp_dl;
 static const struct proto_ops atalk_dgram_ops;
@@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
 
                        if (copy > len)
                                copy = len;
-                       vaddr = kmap_skb_frag(frag);
+                       vaddr = kmap_atomic(skb_frag_page(frag));
                        sum = atalk_sum_partial(vaddr + frag->page_offset +
                                                  offset - start, copy, sum);
-                       kunmap_skb_frag(vaddr);
+                       kunmap_atomic(vaddr);
 
                        if (!(len -= copy))
                                return sum;
index 2b68d068eaf312ee9bda1de84fc0cc8e529a4a77..53f5244e28f809a83b3cc3abca10ae5b61f0324e 100644 (file)
@@ -7,19 +7,28 @@ config BATMAN_ADV
        depends on NET
        select CRC16
         default n
-       ---help---
+       help
+          B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
+          a routing protocol for multi-hop ad-hoc mesh networks. The
+          networks may be wired or wireless. See
+          http://www.open-mesh.org/ for more information and user space
+          tools.
 
-        B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
-        a routing protocol for multi-hop ad-hoc mesh networks. The
-        networks may be wired or wireless. See
-        http://www.open-mesh.org/ for more information and user space
-        tools.
+config BATMAN_ADV_BLA
+       bool "Bridge Loop Avoidance"
+       depends on BATMAN_ADV && INET
+       default y
+       help
+         This option enables BLA (Bridge Loop Avoidance), a mechanism
+         to avoid Ethernet frames looping when mesh nodes are connected
+         to both the same LAN and the same mesh. If you will never use
+         more than one mesh node in the same LAN, you can safely remove
+         this feature and save some space.
 
 config BATMAN_ADV_DEBUG
        bool "B.A.T.M.A.N. debugging"
-       depends on BATMAN_ADV != n
-       ---help---
-
+       depends on BATMAN_ADV
+       help
          This is an option for use by developers; most people should
          say N here. This enables compilation of support for
          outputting debugging information to the kernel log. The
index 4e392ebedb6459ee21c2108b9d850661b503c710..6d5c1940667dd84a4339b40c25b02df83570b946 100644 (file)
@@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o
 batman-adv-y += bat_iv_ogm.o
 batman-adv-y += bat_sysfs.o
 batman-adv-y += bitarray.o
+batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
 batman-adv-y += gateway_client.o
 batman-adv-y += gateway_common.o
 batman-adv-y += hard-interface.o
index c3b0548b175d3a77b7c3dbad84b4ed311ea2a291..916380c73ab76013c80bfe9420fa8f572c199571 100644 (file)
@@ -32,6 +32,7 @@
 #include "soft-interface.h"
 #include "vis.h"
 #include "icmp_socket.h"
+#include "bridge_loop_avoidance.h"
 
 static struct dentry *bat_debugfs;
 
@@ -238,17 +239,19 @@ static int gateways_open(struct inode *inode, struct file *file)
        return single_open(file, gw_client_seq_print_text, net_dev);
 }
 
-static int softif_neigh_open(struct inode *inode, struct file *file)
+static int transtable_global_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
-       return single_open(file, softif_neigh_seq_print_text, net_dev);
+       return single_open(file, tt_global_seq_print_text, net_dev);
 }
 
-static int transtable_global_open(struct inode *inode, struct file *file)
+#ifdef CONFIG_BATMAN_ADV_BLA
+static int bla_claim_table_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
-       return single_open(file, tt_global_seq_print_text, net_dev);
+       return single_open(file, bla_claim_table_seq_print_text, net_dev);
 }
+#endif
 
 static int transtable_local_open(struct inode *inode, struct file *file)
 {
@@ -282,16 +285,20 @@ struct bat_debuginfo bat_debuginfo_##_name = {    \
 static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
 static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
 static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
-static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
 static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
+#ifdef CONFIG_BATMAN_ADV_BLA
+static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
+#endif
 static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
 static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
 
 static struct bat_debuginfo *mesh_debuginfos[] = {
        &bat_debuginfo_originators,
        &bat_debuginfo_gateways,
-       &bat_debuginfo_softif_neigh,
        &bat_debuginfo_transtable_global,
+#ifdef CONFIG_BATMAN_ADV_BLA
+       &bat_debuginfo_bla_claim_table,
+#endif
        &bat_debuginfo_transtable_local,
        &bat_debuginfo_vis_data,
        NULL,
index a6d5d63fb6ad9da8c7f82e87a44a2cb6f2b01935..fab1071f601e2f3ee8177f50114ade7a78f1e5b1 100644 (file)
@@ -850,9 +850,9 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        hlist_for_each_entry_rcu(tmp_neigh_node, node,
                                 &orig_node->neigh_list, list) {
 
-               is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
-                                              orig_node->last_real_seqno,
-                                              batman_ogm_packet->seqno);
+               is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
+                                            orig_node->last_real_seqno,
+                                            batman_ogm_packet->seqno);
 
                if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
                    (tmp_neigh_node->if_incoming == if_incoming))
@@ -866,7 +866,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                                              seq_diff, set_mark);
 
                tmp_neigh_node->real_packet_count =
-                       bit_packet_count(tmp_neigh_node->real_bits);
+                       bitmap_weight(tmp_neigh_node->real_bits,
+                                     TQ_LOCAL_WINDOW_SIZE);
        }
        rcu_read_unlock();
 
@@ -998,11 +999,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
 
                        spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
                        word = &(orig_neigh_node->bcast_own[offset]);
-                       bit_mark(word,
-                                if_incoming_seqno -
+                       bat_set_bit(word,
+                                   if_incoming_seqno -
                                                batman_ogm_packet->seqno - 2);
                        orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
-                               bit_packet_count(word);
+                               bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
                        spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
                }
 
index 68ff759fc3048d7a3f8031cf4a983eeafaf0bfa8..c6efd687ca7585118b7708f2c12dcff23ca76bee 100644 (file)
@@ -386,6 +386,9 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
 
 BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
 BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+#ifdef CONFIG_BATMAN_ADV_BLA
+BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+#endif
 BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
 BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
 static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
@@ -398,12 +401,15 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
 static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
                store_gw_bwidth);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
+BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
 #endif
 
 static struct bat_attribute *mesh_attrs[] = {
        &bat_attr_aggregated_ogms,
        &bat_attr_bonding,
+#ifdef CONFIG_BATMAN_ADV_BLA
+       &bat_attr_bridge_loop_avoidance,
+#endif
        &bat_attr_fragmentation,
        &bat_attr_ap_isolation,
        &bat_attr_vis_mode,
index 6d0aa216b23214f55a257990cdabb0dc8738b1bd..07ae6e1b8aca9c465998eb7da2c914c9b7402cb5 100644 (file)
 
 #include <linux/bitops.h>
 
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-                  uint32_t curr_seqno)
-{
-       int32_t diff, word_offset, word_num;
-
-       diff = last_seqno - curr_seqno;
-       if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
-               return 0;
-       } else {
-               /* which word */
-               word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
-               /* which position in the selected word */
-               word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
-
-               if (test_bit(word_offset, &seq_bits[word_num]))
-                       return 1;
-               else
-                       return 0;
-       }
-}
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n)
-{
-       int32_t word_offset, word_num;
-
-       /* if too old, just drop it */
-       if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
-               return;
-
-       /* which word */
-       word_num = n / WORD_BIT_SIZE;
-       /* which position in the selected word */
-       word_offset = n % WORD_BIT_SIZE;
-
-       set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */
-}
-
 /* shift the packet array by n places. */
-static void bit_shift(unsigned long *seq_bits, int32_t n)
+static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
 {
-       int32_t word_offset, word_num;
-       int32_t i;
-
        if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
                return;
 
-       word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */
-       word_num = n / WORD_BIT_SIZE;   /* shift over how much (full) words */
-
-       for (i = NUM_WORDS - 1; i > word_num; i--) {
-               /* going from old to new, so we don't overwrite the data we copy
-                * from.
-                *
-                * left is high, right is low: FEDC BA98 7654 3210
-                *                                        ^^ ^^
-                *                             vvvv
-                * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
-                * word_offset==WORD_BIT_SIZE/2 ????? in this example.
-                * (=24 bits)
-                *
-                * our desired output would be: 9876 5432 1000 0000
-                * */
-
-               seq_bits[i] =
-                       (seq_bits[i - word_num] << word_offset) +
-                       /* take the lower port from the left half, shift it left
-                        * to its final position */
-                       (seq_bits[i - word_num - 1] >>
-                        (WORD_BIT_SIZE-word_offset));
-               /* and the upper part of the right half and shift it left to
-                * its position */
-               /* for our example that would be: word[0] = 9800 + 0076 =
-                * 9876 */
-       }
-       /* now for our last word, i==word_num, we only have its "left" half.
-        * that's the 1000 word in our example.*/
-
-       seq_bits[i] = (seq_bits[i - word_num] << word_offset);
-
-       /* pad the rest with 0, if there is anything */
-       i--;
-
-       for (; i >= 0; i--)
-               seq_bits[i] = 0;
-}
-
-static void bit_reset_window(unsigned long *seq_bits)
-{
-       int i;
-       for (i = 0; i < NUM_WORDS; i++)
-               seq_bits[i] = 0;
+       bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
 }
 
 
@@ -137,7 +50,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
 
        if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
                if (set_mark)
-                       bit_mark(seq_bits, -seq_num_diff);
+                       bat_set_bit(seq_bits, -seq_num_diff);
                return 0;
        }
 
@@ -145,10 +58,10 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
         * set the mark if required */
 
        if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
-               bit_shift(seq_bits, seq_num_diff);
+               bat_bitmap_shift_left(seq_bits, seq_num_diff);
 
                if (set_mark)
-                       bit_mark(seq_bits, 0);
+                       bat_set_bit(seq_bits, 0);
                return 1;
        }
 
@@ -159,9 +72,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
                bat_dbg(DBG_BATMAN, bat_priv,
                        "We missed a lot of packets (%i) !\n",
                        seq_num_diff - 1);
-               bit_reset_window(seq_bits);
+               bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
                if (set_mark)
-                       bit_mark(seq_bits, 0);
+                       bat_set_bit(seq_bits, 0);
                return 1;
        }
 
@@ -176,9 +89,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Other host probably restarted!\n");
 
-               bit_reset_window(seq_bits);
+               bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
                if (set_mark)
-                       bit_mark(seq_bits, 0);
+                       bat_set_bit(seq_bits, 0);
 
                return 1;
        }
@@ -186,16 +99,3 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
        /* never reached */
        return 0;
 }
-
-/* count the hamming weight, how many good packets did we receive? just count
- * the 1's.
- */
-int bit_packet_count(const unsigned long *seq_bits)
-{
-       int i, hamming = 0;
-
-       for (i = 0; i < NUM_WORDS; i++)
-               hamming += hweight_long(seq_bits[i]);
-
-       return hamming;
-}
index c6135728a680f853a00f161a823c0b23574a7c3d..1835c15cda411d5338e03dbff1c32fb9dd35ab20 100644 (file)
 #ifndef _NET_BATMAN_ADV_BITARRAY_H_
 #define _NET_BATMAN_ADV_BITARRAY_H_
 
-#define WORD_BIT_SIZE (sizeof(unsigned long) * 8)
-
 /* returns true if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-                  uint32_t curr_seqno);
+static inline int bat_test_bit(const unsigned long *seq_bits,
+                              uint32_t last_seqno, uint32_t curr_seqno)
+{
+       int32_t diff;
+
+       diff = last_seqno - curr_seqno;
+       if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
+               return 0;
+       else
+               return  test_bit(diff, seq_bits);
+}
 
 /* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n);
+static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
+{
+       /* if too old, just drop it */
+       if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+               return;
 
+       set_bit(n, seq_bits); /* turn the position on */
+}
 
 /* receive and process one packet, returns 1 if received seq_num is considered
  * new, 0 if old  */
 int bit_get_packet(void *priv, unsigned long *seq_bits,
                   int32_t seq_num_diff, int set_mark);
 
-/* count the hamming weight, how many good packets did we receive? */
-int bit_packet_count(const unsigned long *seq_bits);
-
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644 (file)
index 0000000..1cf18ac
--- /dev/null
@@ -0,0 +1,1583 @@
+/*
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "hash.h"
+#include "hard-interface.h"
+#include "originator.h"
+#include "bridge_loop_avoidance.h"
+#include "translation-table.h"
+#include "send.h"
+
+#include <linux/etherdevice.h>
+#include <linux/crc16.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+
+static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+
+static void bla_periodic_work(struct work_struct *work);
+static void bla_send_announce(struct bat_priv *bat_priv,
+                             struct backbone_gw *backbone_gw);
+
+/* return the index of the claim */
+static inline uint32_t choose_claim(const void *data, uint32_t size)
+{
+       const unsigned char *key = data;
+       uint32_t hash = 0;
+       size_t i;
+
+       for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+               hash += key[i];
+               hash += (hash << 10);
+               hash ^= (hash >> 6);
+       }
+
+       hash += (hash << 3);
+       hash ^= (hash >> 11);
+       hash += (hash << 15);
+
+       return hash % size;
+}
+
+/* return the index of the backbone gateway */
+static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
+{
+       const unsigned char *key = data;
+       uint32_t hash = 0;
+       size_t i;
+
+       for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+               hash += key[i];
+               hash += (hash << 10);
+               hash ^= (hash >> 6);
+       }
+
+       hash += (hash << 3);
+       hash ^= (hash >> 11);
+       hash += (hash << 15);
+
+       return hash % size;
+}
+
+
+/* compares address and vid of two backbone gws */
+static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
+{
+       const void *data1 = container_of(node, struct backbone_gw,
+                                        hash_entry);
+
+       return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* compares address and vid of two claims */
+static int compare_claim(const struct hlist_node *node, const void *data2)
+{
+       const void *data1 = container_of(node, struct claim,
+                                        hash_entry);
+
+       return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* free a backbone gw */
+static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
+{
+       if (atomic_dec_and_test(&backbone_gw->refcount))
+               kfree_rcu(backbone_gw, rcu);
+}
+
+/* finally deinitialize the claim */
+static void claim_free_rcu(struct rcu_head *rcu)
+{
+       struct claim *claim;
+
+       claim = container_of(rcu, struct claim, rcu);
+
+       backbone_gw_free_ref(claim->backbone_gw);
+       kfree(claim);
+}
+
+/* free a claim, call claim_free_rcu if its the last reference */
+static void claim_free_ref(struct claim *claim)
+{
+       if (atomic_dec_and_test(&claim->refcount))
+               call_rcu(&claim->rcu, claim_free_rcu);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: search data (may be local/static data)
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct claim *claim_hash_find(struct bat_priv *bat_priv,
+                                    struct claim *data)
+{
+       struct hashtable_t *hash = bat_priv->claim_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct claim *claim;
+       struct claim *claim_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = choose_claim(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+               if (!compare_claim(&claim->hash_entry, data))
+                       continue;
+
+               if (!atomic_inc_not_zero(&claim->refcount))
+                       continue;
+
+               claim_tmp = claim;
+               break;
+       }
+       rcu_read_unlock();
+
+       return claim_tmp;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address of the originator
+ * @vid: the VLAN ID
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
+                                             uint8_t *addr, short vid)
+{
+       struct hashtable_t *hash = bat_priv->backbone_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct backbone_gw search_entry, *backbone_gw;
+       struct backbone_gw *backbone_gw_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       memcpy(search_entry.orig, addr, ETH_ALEN);
+       search_entry.vid = vid;
+
+       index = choose_backbone_gw(&search_entry, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+               if (!compare_backbone_gw(&backbone_gw->hash_entry,
+                                        &search_entry))
+                       continue;
+
+               if (!atomic_inc_not_zero(&backbone_gw->refcount))
+                       continue;
+
+               backbone_gw_tmp = backbone_gw;
+               break;
+       }
+       rcu_read_unlock();
+
+       return backbone_gw_tmp;
+}
+
+/* delete all claims for a backbone */
+static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
+{
+       struct hashtable_t *hash;
+       struct hlist_node *node, *node_tmp;
+       struct hlist_head *head;
+       struct claim *claim;
+       int i;
+       spinlock_t *list_lock;  /* protects write access to the hash lists */
+
+       hash = backbone_gw->bat_priv->claim_hash;
+       if (!hash)
+               return;
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
+
+               spin_lock_bh(list_lock);
+               hlist_for_each_entry_safe(claim, node, node_tmp,
+                                         head, hash_entry) {
+
+                       if (claim->backbone_gw != backbone_gw)
+                               continue;
+
+                       claim_free_ref(claim);
+                       hlist_del_rcu(node);
+               }
+               spin_unlock_bh(list_lock);
+       }
+
+       /* all claims gone, intialize CRC */
+       backbone_gw->crc = BLA_CRC_INIT;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address to be announced within the claim
+ * @vid: the VLAN ID
+ * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
+ *
+ * sends a claim frame according to the provided info.
+ */
+static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
+                          short vid, int claimtype)
+{
+       struct sk_buff *skb;
+       struct ethhdr *ethhdr;
+       struct hard_iface *primary_if;
+       struct net_device *soft_iface;
+       uint8_t *hw_src;
+       struct bla_claim_dst local_claim_dest;
+       uint32_t zeroip = 0;
+
+       primary_if = primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               return;
+
+       memcpy(&local_claim_dest, &bat_priv->claim_dest,
+              sizeof(local_claim_dest));
+       local_claim_dest.type = claimtype;
+
+       soft_iface = primary_if->soft_iface;
+
+       skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+                        /* IP DST: 0.0.0.0 */
+                        zeroip,
+                        primary_if->soft_iface,
+                        /* IP SRC: 0.0.0.0 */
+                        zeroip,
+                        /* Ethernet DST: Broadcast */
+                        NULL,
+                        /* Ethernet SRC/HW SRC:  originator mac */
+                        primary_if->net_dev->dev_addr,
+                        /* HW DST: FF:43:05:XX:00:00
+                         * with XX   = claim type
+                         * and YY:YY = group id
+                         */
+                        (uint8_t *)&local_claim_dest);
+
+       if (!skb)
+               goto out;
+
+       ethhdr = (struct ethhdr *)skb->data;
+       hw_src = (uint8_t *)ethhdr +
+                sizeof(struct ethhdr) +
+                sizeof(struct arphdr);
+
+       /* now we pretend that the client would have sent this ... */
+       switch (claimtype) {
+       case CLAIM_TYPE_ADD:
+               /* normal claim frame
+                * set Ethernet SRC to the clients mac
+                */
+               memcpy(ethhdr->h_source, mac, ETH_ALEN);
+               bat_dbg(DBG_BLA, bat_priv,
+                       "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
+               break;
+       case CLAIM_TYPE_DEL:
+               /* unclaim frame
+                * set HW SRC to the clients mac
+                */
+               memcpy(hw_src, mac, ETH_ALEN);
+               bat_dbg(DBG_BLA, bat_priv,
+                       "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
+               break;
+       case CLAIM_TYPE_ANNOUNCE:
+               /* announcement frame
+                * set HW SRC to the special mac containg the crc
+                */
+               memcpy(hw_src, mac, ETH_ALEN);
+               bat_dbg(DBG_BLA, bat_priv,
+                       "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
+                       ethhdr->h_source, vid);
+               break;
+       case CLAIM_TYPE_REQUEST:
+               /* request frame
+                * set HW SRC to the special mac containg the crc
+                */
+               memcpy(hw_src, mac, ETH_ALEN);
+               memcpy(ethhdr->h_dest, mac, ETH_ALEN);
+               bat_dbg(DBG_BLA, bat_priv,
+                       "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
+                       ethhdr->h_source, ethhdr->h_dest, vid);
+               break;
+
+       }
+
+       if (vid != -1)
+               skb = vlan_insert_tag(skb, vid);
+
+       skb_reset_mac_header(skb);
+       skb->protocol = eth_type_trans(skb, soft_iface);
+       bat_priv->stats.rx_packets++;
+       bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
+       soft_iface->last_rx = jiffies;
+
+       netif_rx(skb);
+out:
+       if (primary_if)
+               hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address of the originator
+ * @vid: the VLAN ID
+ *
+ * searches for the backbone gw or creates a new one if it could not
+ * be found.
+ */
+static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
+                                              uint8_t *orig, short vid)
+{
+       struct backbone_gw *entry;
+       struct orig_node *orig_node;
+       int hash_added;
+
+       entry = backbone_hash_find(bat_priv, orig, vid);
+
+       if (entry)
+               return entry;
+
+       bat_dbg(DBG_BLA, bat_priv,
+               "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
+               orig, vid);
+
+       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+       if (!entry)
+               return NULL;
+
+       entry->vid = vid;
+       entry->lasttime = jiffies;
+       entry->crc = BLA_CRC_INIT;
+       entry->bat_priv = bat_priv;
+       atomic_set(&entry->request_sent, 0);
+       memcpy(entry->orig, orig, ETH_ALEN);
+
+       /* one for the hash, one for returning */
+       atomic_set(&entry->refcount, 2);
+
+       hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
+                             choose_backbone_gw, entry, &entry->hash_entry);
+
+       if (unlikely(hash_added != 0)) {
+               /* hash failed, free the structure */
+               kfree(entry);
+               return NULL;
+       }
+
+       /* this is a gateway now, remove any tt entries */
+       orig_node = orig_hash_find(bat_priv, orig);
+       if (orig_node) {
+               tt_global_del_orig(bat_priv, orig_node,
+                                  "became a backbone gateway");
+               orig_node_free_ref(orig_node);
+       }
+       return entry;
+}
+
+/* update or add the own backbone gw to make sure we announce
+ * where we receive other backbone gws
+ */
+static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
+                                      struct hard_iface *primary_if,
+                                      short vid)
+{
+       struct backbone_gw *backbone_gw;
+
+       backbone_gw = bla_get_backbone_gw(bat_priv,
+                                         primary_if->net_dev->dev_addr, vid);
+       if (unlikely(!backbone_gw))
+               return;
+
+       backbone_gw->lasttime = jiffies;
+       backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the vid where the request came on
+ *
+ * Repeat all of our own claims, and finally send an ANNOUNCE frame
+ * to allow the requester another check if the CRC is correct now.
+ */
+static void bla_answer_request(struct bat_priv *bat_priv,
+                              struct hard_iface *primary_if, short vid)
+{
+       struct hlist_node *node;
+       struct hlist_head *head;
+       struct hashtable_t *hash;
+       struct claim *claim;
+       struct backbone_gw *backbone_gw;
+       int i;
+
+       bat_dbg(DBG_BLA, bat_priv,
+               "bla_answer_request(): received a claim request, send all of our own claims again\n");
+
+       backbone_gw = backbone_hash_find(bat_priv,
+                                        primary_if->net_dev->dev_addr, vid);
+       if (!backbone_gw)
+               return;
+
+       hash = bat_priv->claim_hash;
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+                       /* only own claims are interesting */
+                       if (claim->backbone_gw != backbone_gw)
+                               continue;
+
+                       bla_send_claim(bat_priv, claim->addr, claim->vid,
+                                      CLAIM_TYPE_ADD);
+               }
+               rcu_read_unlock();
+       }
+
+       /* finally, send an announcement frame */
+       bla_send_announce(bat_priv, backbone_gw);
+       backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @backbone_gw: the backbone gateway from whom we are out of sync
+ *
+ * When the crc is wrong, ask the backbone gateway for a full table update.
+ * After the request, it will repeat all of his own claims and finally
+ * send an announcement claim with which we can check again.
+ */
+static void bla_send_request(struct backbone_gw *backbone_gw)
+{
+       /* first, remove all old entries */
+       bla_del_backbone_claims(backbone_gw);
+
+       bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+               "Sending REQUEST to %pM\n",
+               backbone_gw->orig);
+
+       /* send request */
+       bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
+                      backbone_gw->vid, CLAIM_TYPE_REQUEST);
+
+       /* no local broadcasts should be sent or received, for now. */
+       if (!atomic_read(&backbone_gw->request_sent)) {
+               atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+               atomic_set(&backbone_gw->request_sent, 1);
+       }
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: our backbone gateway which should be announced
+ *
+ * This function sends an announcement. It is called from multiple
+ * places.
+ */
+static void bla_send_announce(struct bat_priv *bat_priv,
+                             struct backbone_gw *backbone_gw)
+{
+       uint8_t mac[ETH_ALEN];
+       uint16_t crc;
+
+       memcpy(mac, announce_mac, 4);
+       crc = htons(backbone_gw->crc);
+       memcpy(&mac[4], (uint8_t *)&crc, 2);
+
+       bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
+
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: the mac address of the claim
+ * @vid: the VLAN ID of the frame
+ * @backbone_gw: the backbone gateway which claims it
+ *
+ * Adds a claim in the claim hash.
+ */
+static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+                         const short vid, struct backbone_gw *backbone_gw)
+{
+       struct claim *claim;
+       struct claim search_claim;
+       int hash_added;
+
+       memcpy(search_claim.addr, mac, ETH_ALEN);
+       search_claim.vid = vid;
+       claim = claim_hash_find(bat_priv, &search_claim);
+
+       /* create a new claim entry if it does not exist yet. */
+       if (!claim) {
+               claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
+               if (!claim)
+                       return;
+
+               memcpy(claim->addr, mac, ETH_ALEN);
+               claim->vid = vid;
+               claim->lasttime = jiffies;
+               claim->backbone_gw = backbone_gw;
+
+               atomic_set(&claim->refcount, 2);
+               bat_dbg(DBG_BLA, bat_priv,
+                       "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
+                       mac, vid);
+               hash_added = hash_add(bat_priv->claim_hash, compare_claim,
+                                     choose_claim, claim, &claim->hash_entry);
+
+               if (unlikely(hash_added != 0)) {
+                       /* only local changes happened. */
+                       kfree(claim);
+                       return;
+               }
+       } else {
+               claim->lasttime = jiffies;
+               if (claim->backbone_gw == backbone_gw)
+                       /* no need to register a new backbone */
+                       goto claim_free_ref;
+
+               bat_dbg(DBG_BLA, bat_priv,
+                       "bla_add_claim(): changing ownership for %pM, vid %d\n",
+                       mac, vid);
+
+               claim->backbone_gw->crc ^=
+                       crc16(0, claim->addr, ETH_ALEN);
+               backbone_gw_free_ref(claim->backbone_gw);
+
+       }
+       /* set (new) backbone gw */
+       atomic_inc(&backbone_gw->refcount);
+       claim->backbone_gw = backbone_gw;
+
+       backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+       backbone_gw->lasttime = jiffies;
+
+claim_free_ref:
+       claim_free_ref(claim);
+}
+
+/* Delete a claim from the claim hash which has the
+ * given mac address and vid.
+ */
+static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+                         const short vid)
+{
+       struct claim search_claim, *claim;
+
+       memcpy(search_claim.addr, mac, ETH_ALEN);
+       search_claim.vid = vid;
+       claim = claim_hash_find(bat_priv, &search_claim);
+       if (!claim)
+               return;
+
+       bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
+
+       hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
+       claim_free_ref(claim); /* reference from the hash is gone */
+
+       claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+
+       /* don't need the reference from hash_find() anymore */
+       claim_free_ref(claim);
+}
+
+/* check for ANNOUNCE frame, return 1 if handled */
+static int handle_announce(struct bat_priv *bat_priv,
+                          uint8_t *an_addr, uint8_t *backbone_addr, short vid)
+{
+       struct backbone_gw *backbone_gw;
+       uint16_t crc;
+
+       if (memcmp(an_addr, announce_mac, 4) != 0)
+               return 0;
+
+       backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+       if (unlikely(!backbone_gw))
+               return 1;
+
+
+       /* handle as ANNOUNCE frame */
+       backbone_gw->lasttime = jiffies;
+       crc = ntohs(*((uint16_t *)(&an_addr[4])));
+
+       bat_dbg(DBG_BLA, bat_priv,
+               "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
+               vid, backbone_gw->orig, crc);
+
+       if (backbone_gw->crc != crc) {
+               bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+                       "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
+                       backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
+                       crc);
+
+               bla_send_request(backbone_gw);
+       } else {
+               /* if we have sent a request and the crc was OK,
+                * we can allow traffic again.
+                */
+               if (atomic_read(&backbone_gw->request_sent)) {
+                       atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+                       atomic_set(&backbone_gw->request_sent, 0);
+               }
+       }
+
+       backbone_gw_free_ref(backbone_gw);
+       return 1;
+}
+
+/* check for REQUEST frame, return 1 if handled */
+static int handle_request(struct bat_priv *bat_priv,
+                         struct hard_iface *primary_if,
+                         uint8_t *backbone_addr,
+                         struct ethhdr *ethhdr, short vid)
+{
+       /* check for REQUEST frame */
+       if (!compare_eth(backbone_addr, ethhdr->h_dest))
+               return 0;
+
+       /* sanity check, this should not happen on a normal switch,
+        * we ignore it in this case.
+        */
+       if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
+               return 1;
+
+       bat_dbg(DBG_BLA, bat_priv,
+               "handle_request(): REQUEST vid %d (sent by %pM)...\n",
+               vid, ethhdr->h_source);
+
+       bla_answer_request(bat_priv, primary_if, vid);
+       return 1;
+}
+
+/* check for UNCLAIM frame, return 1 if handled */
+static int handle_unclaim(struct bat_priv *bat_priv,
+                         struct hard_iface *primary_if,
+                         uint8_t *backbone_addr,
+                         uint8_t *claim_addr, short vid)
+{
+       struct backbone_gw *backbone_gw;
+
+       /* unclaim in any case if it is our own */
+       if (primary_if && compare_eth(backbone_addr,
+                                     primary_if->net_dev->dev_addr))
+               bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
+
+       backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
+
+       if (!backbone_gw)
+               return 1;
+
+       /* this must be an UNCLAIM frame */
+       bat_dbg(DBG_BLA, bat_priv,
+               "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
+               claim_addr, vid, backbone_gw->orig);
+
+       bla_del_claim(bat_priv, claim_addr, vid);
+       backbone_gw_free_ref(backbone_gw);
+       return 1;
+}
+
+/* check for CLAIM frame, return 1 if handled */
+static int handle_claim(struct bat_priv *bat_priv,
+                       struct hard_iface *primary_if, uint8_t *backbone_addr,
+                       uint8_t *claim_addr, short vid)
+{
+       struct backbone_gw *backbone_gw;
+
+       /* register the gateway if not yet available, and add the claim. */
+
+       backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+       if (unlikely(!backbone_gw))
+               return 1;
+
+       /* this must be a CLAIM frame */
+       bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
+       if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+               bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
+
+       /* TODO: we could call something like tt_local_del() here. */
+
+       backbone_gw_free_ref(backbone_gw);
+       return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hw_src: the Hardware source in the ARP Header
+ * @hw_dst: the Hardware destination in the ARP Header
+ * @ethhdr: pointer to the Ethernet header of the claim frame
+ *
+ * checks if it is a claim packet and if its on the same group.
+ * This function also applies the group ID of the sender
+ * if it is in the same mesh.
+ *
+ * returns:
+ *     2  - if it is a claim packet and on the same group
+ *     1  - if is a claim packet from another group
+ *     0  - if it is not a claim packet
+ */
+static int check_claim_group(struct bat_priv *bat_priv,
+                            struct hard_iface *primary_if,
+                            uint8_t *hw_src, uint8_t *hw_dst,
+                            struct ethhdr *ethhdr)
+{
+       uint8_t *backbone_addr;
+       struct orig_node *orig_node;
+       struct bla_claim_dst *bla_dst, *bla_dst_own;
+
+       bla_dst = (struct bla_claim_dst *)hw_dst;
+       bla_dst_own = &bat_priv->claim_dest;
+
+       /* check if it is a claim packet in general */
+       if (memcmp(bla_dst->magic, bla_dst_own->magic,
+                  sizeof(bla_dst->magic)) != 0)
+               return 0;
+
+       /* if announcement packet, use the source,
+        * otherwise assume it is in the hw_src
+        */
+       switch (bla_dst->type) {
+       case CLAIM_TYPE_ADD:
+               backbone_addr = hw_src;
+               break;
+       case CLAIM_TYPE_REQUEST:
+       case CLAIM_TYPE_ANNOUNCE:
+       case CLAIM_TYPE_DEL:
+               backbone_addr = ethhdr->h_source;
+               break;
+       default:
+               return 0;
+       }
+
+       /* don't accept claim frames from ourselves */
+       if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+               return 0;
+
+       /* if its already the same group, it is fine. */
+       if (bla_dst->group == bla_dst_own->group)
+               return 2;
+
+       /* lets see if this originator is in our mesh */
+       orig_node = orig_hash_find(bat_priv, backbone_addr);
+
+       /* dont accept claims from gateways which are not in
+        * the same mesh or group.
+        */
+       if (!orig_node)
+               return 1;
+
+       /* if our mesh friends mac is bigger, use it for ourselves. */
+       if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
+               bat_dbg(DBG_BLA, bat_priv,
+                       "taking other backbones claim group: %04x\n",
+                       ntohs(bla_dst->group));
+               bla_dst_own->group = bla_dst->group;
+       }
+
+       orig_node_free_ref(orig_node);
+
+       return 2;
+}
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ *
+ * Check if this is a claim frame, and process it accordingly.
+ *
+ * returns 1 if it was a claim frame, otherwise return 0 to
+ * tell the callee that it can use the frame on its own.
+ */
+static int bla_process_claim(struct bat_priv *bat_priv,
+                            struct hard_iface *primary_if,
+                            struct sk_buff *skb)
+{
+       struct ethhdr *ethhdr;
+       struct vlan_ethhdr *vhdr;
+       struct arphdr *arphdr;
+       uint8_t *hw_src, *hw_dst;
+       struct bla_claim_dst *bla_dst;
+       uint16_t proto;
+       int headlen;
+       short vid = -1;
+       int ret;
+
+       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+               vhdr = (struct vlan_ethhdr *)ethhdr;
+               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+               proto = ntohs(vhdr->h_vlan_encapsulated_proto);
+               headlen = sizeof(*vhdr);
+       } else {
+               proto = ntohs(ethhdr->h_proto);
+               headlen = sizeof(*ethhdr);
+       }
+
+       if (proto != ETH_P_ARP)
+               return 0; /* not a claim frame */
+
+       /* this must be a ARP frame. check if it is a claim. */
+
+       if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
+               return 0;
+
+       /* pskb_may_pull() may have modified the pointers, get ethhdr again */
+       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
+
+       /* Check whether the ARP frame carries a valid
+        * IP information
+        */
+
+       if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
+               return 0;
+       if (arphdr->ar_pro != htons(ETH_P_IP))
+               return 0;
+       if (arphdr->ar_hln != ETH_ALEN)
+               return 0;
+       if (arphdr->ar_pln != 4)
+               return 0;
+
+       hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
+       hw_dst = hw_src + ETH_ALEN + 4;
+       bla_dst = (struct bla_claim_dst *)hw_dst;
+
+       /* check if it is a claim frame. */
+       ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
+       if (ret == 1)
+               bat_dbg(DBG_BLA, bat_priv,
+                       "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+                       ethhdr->h_source, vid, hw_src, hw_dst);
+
+       if (ret < 2)
+               return ret;
+
+       /* become a backbone gw ourselves on this vlan if not happened yet */
+       bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+
+       /* check for the different types of claim frames ... */
+       switch (bla_dst->type) {
+       case CLAIM_TYPE_ADD:
+               if (handle_claim(bat_priv, primary_if, hw_src,
+                                ethhdr->h_source, vid))
+                       return 1;
+               break;
+       case CLAIM_TYPE_DEL:
+               if (handle_unclaim(bat_priv, primary_if,
+                                  ethhdr->h_source, hw_src, vid))
+                       return 1;
+               break;
+
+       case CLAIM_TYPE_ANNOUNCE:
+               if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
+                       return 1;
+               break;
+       case CLAIM_TYPE_REQUEST:
+               if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
+                       return 1;
+               break;
+       }
+
+       bat_dbg(DBG_BLA, bat_priv,
+               "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+               ethhdr->h_source, vid, hw_src, hw_dst);
+       return 1;
+}
+
+/* Check when we last heard from other nodes, and remove them in case of
+ * a time out, or clean all backbone gws if now is set.
+ */
+static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
+{
+       struct backbone_gw *backbone_gw;
+       struct hlist_node *node, *node_tmp;
+       struct hlist_head *head;
+       struct hashtable_t *hash;
+       spinlock_t *list_lock;  /* protects write access to the hash lists */
+       int i;
+
+       hash = bat_priv->backbone_hash;
+       if (!hash)
+               return;
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
+
+               spin_lock_bh(list_lock);
+               hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
+                                         head, hash_entry) {
+                       if (now)
+                               goto purge_now;
+                       if (!has_timed_out(backbone_gw->lasttime,
+                                          BLA_BACKBONE_TIMEOUT))
+                               continue;
+
+                       bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+                               "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
+                               backbone_gw->orig);
+
+purge_now:
+                       /* don't wait for the pending request anymore */
+                       if (atomic_read(&backbone_gw->request_sent))
+                               atomic_dec(&bat_priv->bla_num_requests);
+
+                       bla_del_backbone_claims(backbone_gw);
+
+                       hlist_del_rcu(node);
+                       backbone_gw_free_ref(backbone_gw);
+               }
+               spin_unlock_bh(list_lock);
+       }
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface, may be NULL if now is set
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we heard last time from our own claims, and remove them in case of
+ * a time out, or clean all claims if now is set
+ */
+static void bla_purge_claims(struct bat_priv *bat_priv,
+                            struct hard_iface *primary_if, int now)
+{
+       struct claim *claim;
+       struct hlist_node *node;
+       struct hlist_head *head;
+       struct hashtable_t *hash;
+       int i;
+
+       hash = bat_priv->claim_hash;
+       if (!hash)
+               return;
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+                       if (now)
+                               goto purge_now;
+                       if (!compare_eth(claim->backbone_gw->orig,
+                                        primary_if->net_dev->dev_addr))
+                               continue;
+                       if (!has_timed_out(claim->lasttime,
+                                          BLA_CLAIM_TIMEOUT))
+                               continue;
+
+                       bat_dbg(DBG_BLA, bat_priv,
+                               "bla_purge_claims(): %pM, vid %d, time out\n",
+                               claim->addr, claim->vid);
+
+purge_now:
+                       handle_unclaim(bat_priv, primary_if,
+                                      claim->backbone_gw->orig,
+                                      claim->addr, claim->vid);
+               }
+               rcu_read_unlock();
+       }
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the new selected primary_if
+ * @oldif: the old primary interface, may be NULL
+ *
+ * Update the backbone gateways when the own orig address changes.
+ *
+ */
+void bla_update_orig_address(struct bat_priv *bat_priv,
+                            struct hard_iface *primary_if,
+                            struct hard_iface *oldif)
+{
+       struct backbone_gw *backbone_gw;
+       struct hlist_node *node;
+       struct hlist_head *head;
+       struct hashtable_t *hash;
+       int i;
+
+       /* reset bridge loop avoidance group id */
+       bat_priv->claim_dest.group =
+               htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+
+       if (!oldif) {
+               bla_purge_claims(bat_priv, NULL, 1);
+               bla_purge_backbone_gw(bat_priv, 1);
+               return;
+       }
+
+       hash = bat_priv->backbone_hash;
+       if (!hash)
+               return;
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+                       /* own orig still holds the old value. */
+                       if (!compare_eth(backbone_gw->orig,
+                                        oldif->net_dev->dev_addr))
+                               continue;
+
+                       memcpy(backbone_gw->orig,
+                              primary_if->net_dev->dev_addr, ETH_ALEN);
+                       /* send an announce frame so others will ask for our
+                        * claims and update their tables.
+                        */
+                       bla_send_announce(bat_priv, backbone_gw);
+               }
+               rcu_read_unlock();
+       }
+}
+
+
+
+/* (re)start the timer */
+static void bla_start_timer(struct bat_priv *bat_priv)
+{
+       INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
+       queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
+                          msecs_to_jiffies(BLA_PERIOD_LENGTH));
+}
+
+/* periodic work to do:
+ *  * purge structures when they are too old
+ *  * send announcements
+ */
+static void bla_periodic_work(struct work_struct *work)
+{
+       struct delayed_work *delayed_work =
+               container_of(work, struct delayed_work, work);
+       struct bat_priv *bat_priv =
+               container_of(delayed_work, struct bat_priv, bla_work);
+       struct hlist_node *node;
+       struct hlist_head *head;
+       struct backbone_gw *backbone_gw;
+       struct hashtable_t *hash;
+       struct hard_iface *primary_if;
+       int i;
+
+       primary_if = primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out;
+
+       bla_purge_claims(bat_priv, primary_if, 0);
+       bla_purge_backbone_gw(bat_priv, 0);
+
+       if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+               goto out;
+
+       hash = bat_priv->backbone_hash;
+       if (!hash)
+               goto out;
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+                       if (!compare_eth(backbone_gw->orig,
+                                        primary_if->net_dev->dev_addr))
+                               continue;
+
+                       backbone_gw->lasttime = jiffies;
+
+                       bla_send_announce(bat_priv, backbone_gw);
+               }
+               rcu_read_unlock();
+       }
+out:
+       if (primary_if)
+               hardif_free_ref(primary_if);
+
+       bla_start_timer(bat_priv);
+}
+
+/* initialize all bla structures */
+int bla_init(struct bat_priv *bat_priv)
+{
+       int i;
+       uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+       struct hard_iface *primary_if;
+
+       bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
+
+       /* setting claim destination address */
+       memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
+       bat_priv->claim_dest.type = 0;
+       primary_if = primary_if_get_selected(bat_priv);
+       if (primary_if) {
+               bat_priv->claim_dest.group =
+                       htons(crc16(0, primary_if->net_dev->dev_addr,
+                                   ETH_ALEN));
+               hardif_free_ref(primary_if);
+       } else {
+               bat_priv->claim_dest.group = 0; /* will be set later */
+       }
+
+       /* initialize the duplicate list */
+       for (i = 0; i < DUPLIST_SIZE; i++)
+               bat_priv->bcast_duplist[i].entrytime =
+                       jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
+       bat_priv->bcast_duplist_curr = 0;
+
+       if (bat_priv->claim_hash)
+               return 1;
+
+       bat_priv->claim_hash = hash_new(128);
+       bat_priv->backbone_hash = hash_new(32);
+
+       if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+               return -1;
+
+       bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
+
+       bla_start_timer(bat_priv);
+       return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bcast_packet: originator mac address
+ * @hdr_size: maximum length of the frame
+ *
+ * check if it is on our broadcast list. Another gateway might
+ * have sent the same packet because it is connected to the same backbone,
+ * so we have to remove this duplicate.
+ *
+ * This is performed by checking the CRC, which will tell us
+ * with a good chance that it is the same packet. If it is furthermore
+ * sent by another host, drop it. We allow equal packets from
+ * the same host however as this might be intended.
+ *
+ **/
+
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+                           struct bcast_packet *bcast_packet,
+                           int hdr_size)
+{
+       int i, length, curr;
+       uint8_t *content;
+       uint16_t crc;
+       struct bcast_duplist_entry *entry;
+
+       length = hdr_size - sizeof(*bcast_packet);
+       content = (uint8_t *)bcast_packet;
+       content += sizeof(*bcast_packet);
+
+       /* calculate the crc ... */
+       crc = crc16(0, content, length);
+
+       for (i = 0 ; i < DUPLIST_SIZE; i++) {
+               curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
+               entry = &bat_priv->bcast_duplist[curr];
+
+               /* we can stop searching if the entry is too old ;
+                * later entries will be even older
+                */
+               if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
+                       break;
+
+               if (entry->crc != crc)
+                       continue;
+
+               if (compare_eth(entry->orig, bcast_packet->orig))
+                       continue;
+
+               /* this entry seems to match: same crc, not too old,
+                * and from another gw. therefore return 1 to forbid it.
+                */
+               return 1;
+       }
+       /* not found, add a new entry (overwrite the oldest entry) */
+       curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
+       entry = &bat_priv->bcast_duplist[curr];
+       entry->crc = crc;
+       entry->entrytime = jiffies;
+       memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
+       bat_priv->bcast_duplist_curr = curr;
+
+       /* allow it, its the first occurence. */
+       return 0;
+}
+
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: originator mac address
+ *
+ * check if the originator is a gateway for any VLAN ID.
+ *
+ * returns 1 if it is found, 0 otherwise
+ *
+ */
+
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
+{
+       struct hashtable_t *hash = bat_priv->backbone_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct backbone_gw *backbone_gw;
+       int i;
+
+       if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+               return 0;
+
+       if (!hash)
+               return 0;
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+                       if (compare_eth(backbone_gw->orig, orig)) {
+                               rcu_read_unlock();
+                               return 1;
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+       return 0;
+}
+
+
+/**
+ * @skb: the frame to be checked
+ * @orig_node: the orig_node of the frame
+ * @hdr_size: maximum length of the frame
+ *
+ * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
+ * if the orig_node is also a gateway on the soft interface, otherwise it
+ * returns 0.
+ *
+ */
+int bla_is_backbone_gw(struct sk_buff *skb,
+                      struct orig_node *orig_node, int hdr_size)
+{
+       struct ethhdr *ethhdr;
+       struct vlan_ethhdr *vhdr;
+       struct backbone_gw *backbone_gw;
+       short vid = -1;
+
+       if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
+               return 0;
+
+       /* first, find out the vid. */
+       if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr)))
+               return 0;
+
+       ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
+
+       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+               if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
+                       return 0;
+
+               vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
+                                             hdr_size);
+               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+       }
+
+       /* see if this originator is a backbone gw for this VLAN */
+
+       backbone_gw = backbone_hash_find(orig_node->bat_priv,
+                                        orig_node->orig, vid);
+       if (!backbone_gw)
+               return 0;
+
+       backbone_gw_free_ref(backbone_gw);
+       return 1;
+}
+
+/* free all bla structures (for softinterface free or module unload) */
+void bla_free(struct bat_priv *bat_priv)
+{
+       struct hard_iface *primary_if;
+
+       cancel_delayed_work_sync(&bat_priv->bla_work);
+       primary_if = primary_if_get_selected(bat_priv);
+
+       if (bat_priv->claim_hash) {
+               bla_purge_claims(bat_priv, primary_if, 1);
+               hash_destroy(bat_priv->claim_hash);
+               bat_priv->claim_hash = NULL;
+       }
+       if (bat_priv->backbone_hash) {
+               bla_purge_backbone_gw(bat_priv, 1);
+               hash_destroy(bat_priv->backbone_hash);
+               bat_priv->backbone_hash = NULL;
+       }
+       if (primary_if)
+               hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_rx avoidance checks if:
+ *  * we have to race for a claim
+ *  * if the frame is allowed on the LAN
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+       struct ethhdr *ethhdr;
+       struct claim search_claim, *claim = NULL;
+       struct hard_iface *primary_if;
+       int ret;
+
+       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+       primary_if = primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto handled;
+
+       if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+               goto allow;
+
+
+       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+               /* don't allow broadcasts while requests are in flight */
+               if (is_multicast_ether_addr(ethhdr->h_dest))
+                       goto handled;
+
+       memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+       search_claim.vid = vid;
+       claim = claim_hash_find(bat_priv, &search_claim);
+
+       if (!claim) {
+               /* possible optimization: race for a claim */
+               /* No claim exists yet, claim it for us!
+                */
+               handle_claim(bat_priv, primary_if,
+                            primary_if->net_dev->dev_addr,
+                            ethhdr->h_source, vid);
+               goto allow;
+       }
+
+       /* if it is our own claim ... */
+       if (compare_eth(claim->backbone_gw->orig,
+                       primary_if->net_dev->dev_addr)) {
+               /* ... allow it in any case */
+               claim->lasttime = jiffies;
+               goto allow;
+       }
+
+       /* if it is a broadcast ... */
+       if (is_multicast_ether_addr(ethhdr->h_dest)) {
+               /* ... drop it. the responsible gateway is in charge. */
+               goto handled;
+       } else {
+               /* seems the client considers us as its best gateway.
+                * send a claim and update the claim table
+                * immediately.
+                */
+               handle_claim(bat_priv, primary_if,
+                            primary_if->net_dev->dev_addr,
+                            ethhdr->h_source, vid);
+               goto allow;
+       }
+allow:
+       bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+       ret = 0;
+       goto out;
+
+handled:
+       kfree_skb(skb);
+       ret = 1;
+
+out:
+       if (primary_if)
+               hardif_free_ref(primary_if);
+       if (claim)
+               claim_free_ref(claim);
+       return ret;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_tx checks if:
+ *  * a claim was received which has to be processed
+ *  * the frame is allowed on the mesh
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+       struct ethhdr *ethhdr;
+       struct claim search_claim, *claim = NULL;
+       struct hard_iface *primary_if;
+       int ret = 0;
+
+       primary_if = primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out;
+
+       if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+               goto allow;
+
+       /* in VLAN case, the mac header might not be set. */
+       skb_reset_mac_header(skb);
+
+       if (bla_process_claim(bat_priv, primary_if, skb))
+               goto handled;
+
+       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+               /* don't allow broadcasts while requests are in flight */
+               if (is_multicast_ether_addr(ethhdr->h_dest))
+                       goto handled;
+
+       memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+       search_claim.vid = vid;
+
+       claim = claim_hash_find(bat_priv, &search_claim);
+
+       /* if no claim exists, allow it. */
+       if (!claim)
+               goto allow;
+
+       /* check if we are responsible. */
+       if (compare_eth(claim->backbone_gw->orig,
+                       primary_if->net_dev->dev_addr)) {
+               /* if yes, the client has roamed and we have
+                * to unclaim it.
+                */
+               handle_unclaim(bat_priv, primary_if,
+                              primary_if->net_dev->dev_addr,
+                              ethhdr->h_source, vid);
+               goto allow;
+       }
+
+       /* check if it is a multicast/broadcast frame */
+       if (is_multicast_ether_addr(ethhdr->h_dest)) {
+               /* drop it. the responsible gateway has forwarded it into
+                * the backbone network.
+                */
+               goto handled;
+       } else {
+               /* we must allow it. at least if we are
+                * responsible for the DESTINATION.
+                */
+               goto allow;
+       }
+allow:
+       bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+       ret = 0;
+       goto out;
+handled:
+       ret = 1;
+out:
+       if (primary_if)
+               hardif_free_ref(primary_if);
+       if (claim)
+               claim_free_ref(claim);
+       return ret;
+}
+
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+       struct net_device *net_dev = (struct net_device *)seq->private;
+       struct bat_priv *bat_priv = netdev_priv(net_dev);
+       struct hashtable_t *hash = bat_priv->claim_hash;
+       struct claim *claim;
+       struct hard_iface *primary_if;
+       struct hlist_node *node;
+       struct hlist_head *head;
+       uint32_t i;
+       bool is_own;
+       int ret = 0;
+
+       primary_if = primary_if_get_selected(bat_priv);
+       if (!primary_if) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       if (primary_if->if_status != IF_ACTIVE) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - primary interface not active\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       seq_printf(seq,
+                  "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
+                  net_dev->name, primary_if->net_dev->dev_addr,
+                  ntohs(bat_priv->claim_dest.group));
+       seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
+                  "Client", "VID", "Originator", "CRC");
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+                       is_own = compare_eth(claim->backbone_gw->orig,
+                                            primary_if->net_dev->dev_addr);
+                       seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
+                                  claim->addr, claim->vid,
+                                  claim->backbone_gw->orig,
+                                  (is_own ? 'x' : ' '),
+                                  claim->backbone_gw->crc);
+               }
+               rcu_read_unlock();
+       }
+out:
+       if (primary_if)
+               hardif_free_ref(primary_if);
+       return ret;
+}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644 (file)
index 0000000..4a8e4fc
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#ifndef _NET_BATMAN_ADV_BLA_H_
+#define _NET_BATMAN_ADV_BLA_H_
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_is_backbone_gw(struct sk_buff *skb,
+                      struct orig_node *orig_node, int hdr_size);
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+                           struct bcast_packet *bcast_packet, int hdr_size);
+void bla_update_orig_address(struct bat_priv *bat_priv,
+                            struct hard_iface *primary_if,
+                            struct hard_iface *oldif);
+int bla_init(struct bat_priv *bat_priv);
+void bla_free(struct bat_priv *bat_priv);
+
+#define BLA_CRC_INIT   0
+#else /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
+                        short vid)
+{
+       return 0;
+}
+
+static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
+                        short vid)
+{
+       return 0;
+}
+
+static inline int bla_is_backbone_gw(struct sk_buff *skb,
+                                    struct orig_node *orig_node,
+                                    int hdr_size)
+{
+       return 0;
+}
+
+static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
+                                                void *offset)
+{
+       return 0;
+}
+
+static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
+                                         uint8_t *orig)
+{
+       return 0;
+}
+
+static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+                                         struct bcast_packet *bcast_packet,
+                                         int hdr_size)
+{
+       return 0;
+}
+
+static inline void bla_update_orig_address(struct bat_priv *bat_priv,
+                                          struct hard_iface *primary_if,
+                                          struct hard_iface *oldif)
+{
+}
+
+static inline int bla_init(struct bat_priv *bat_priv)
+{
+       return 1;
+}
+
+static inline void bla_free(struct bat_priv *bat_priv)
+{
+}
+
+#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
index 377897701a850bff63b7eb981e187f11e1db6fd6..8c4b790b98bec99792256a02b60077f972f2e752 100644 (file)
@@ -28,6 +28,7 @@
 #include "bat_sysfs.h"
 #include "originator.h"
 #include "hash.h"
+#include "bridge_loop_avoidance.h"
 
 #include <linux/if_arp.h>
 
@@ -107,7 +108,8 @@ out:
        return hard_iface;
 }
 
-static void primary_if_update_addr(struct bat_priv *bat_priv)
+static void primary_if_update_addr(struct bat_priv *bat_priv,
+                                  struct hard_iface *oldif)
 {
        struct vis_packet *vis_packet;
        struct hard_iface *primary_if;
@@ -122,6 +124,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv)
        memcpy(vis_packet->sender_orig,
               primary_if->net_dev->dev_addr, ETH_ALEN);
 
+       bla_update_orig_address(bat_priv, primary_if, oldif);
 out:
        if (primary_if)
                hardif_free_ref(primary_if);
@@ -140,14 +143,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
        curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
        rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
 
-       if (curr_hard_iface)
-               hardif_free_ref(curr_hard_iface);
-
        if (!new_hard_iface)
-               return;
+               goto out;
 
        bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
-       primary_if_update_addr(bat_priv);
+       primary_if_update_addr(bat_priv, curr_hard_iface);
+
+out:
+       if (curr_hard_iface)
+               hardif_free_ref(curr_hard_iface);
 }
 
 static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -531,7 +535,7 @@ static int hard_if_event(struct notifier_block *this,
                        goto hardif_put;
 
                if (hard_iface == primary_if)
-                       primary_if_update_addr(bat_priv);
+                       primary_if_update_addr(bat_priv, NULL);
                break;
        default:
                break;
index 6d51caaf8cecb782fcb9c4da2b50000f1546f865..e67ca96285b3396ab07a9004f95235fb6a048ddb 100644 (file)
@@ -30,6 +30,7 @@
 #include "translation-table.h"
 #include "hard-interface.h"
 #include "gateway_client.h"
+#include "bridge_loop_avoidance.h"
 #include "vis.h"
 #include "hash.h"
 #include "bat_algo.h"
@@ -96,13 +97,10 @@ int mesh_init(struct net_device *soft_iface)
        spin_lock_init(&bat_priv->gw_list_lock);
        spin_lock_init(&bat_priv->vis_hash_lock);
        spin_lock_init(&bat_priv->vis_list_lock);
-       spin_lock_init(&bat_priv->softif_neigh_lock);
-       spin_lock_init(&bat_priv->softif_neigh_vid_lock);
 
        INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
        INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
        INIT_HLIST_HEAD(&bat_priv->gw_list);
-       INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
        INIT_LIST_HEAD(&bat_priv->tt_changes_list);
        INIT_LIST_HEAD(&bat_priv->tt_req_list);
        INIT_LIST_HEAD(&bat_priv->tt_roam_list);
@@ -118,6 +116,9 @@ int mesh_init(struct net_device *soft_iface)
        if (vis_init(bat_priv) < 1)
                goto err;
 
+       if (bla_init(bat_priv) < 1)
+               goto err;
+
        atomic_set(&bat_priv->gw_reselect, 0);
        atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
        goto end;
@@ -145,7 +146,7 @@ void mesh_free(struct net_device *soft_iface)
 
        tt_free(bat_priv);
 
-       softif_neigh_purge(bat_priv);
+       bla_free(bat_priv);
 
        atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
 }
index 94fa1c2393a6952fdbeeae735110e3033de9a6d9..d9832acf558dc0e2d9cd3ef29cb33c47f0df051c 100644 (file)
@@ -65,7 +65,7 @@
 
 #define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
 
-#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
+#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
 
 #define LOG_BUF_LEN 8192         /* has to be a power of 2 */
 
 #define MAX_AGGREGATION_BYTES 512
 #define MAX_AGGREGATION_MS 100
 
-#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */
+#define BLA_PERIOD_LENGTH      10000   /* 10 seconds */
+#define BLA_BACKBONE_TIMEOUT   (BLA_PERIOD_LENGTH * 3)
+#define BLA_CLAIM_TIMEOUT      (BLA_PERIOD_LENGTH * 10)
 
+#define DUPLIST_SIZE           16
+#define DUPLIST_TIMEOUT                500     /* 500 ms */
 /* don't reset again within 30 seconds */
 #define RESET_PROTECTION_MS 30000
 #define EXPECTED_SEQNO_RANGE   65536
@@ -119,7 +123,8 @@ enum dbg_level {
        DBG_BATMAN = 1 << 0,
        DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
        DBG_TT     = 1 << 2, /* translation table operations */
-       DBG_ALL    = 7
+       DBG_BLA    = 1 << 3, /* bridge loop avoidance */
+       DBG_ALL    = 15
 };
 
 /* Kernel headers */
index 43c0a4f1399e9d2bfacd970193de10afcd0503f3..ce496988589498743a3ef2380b9da8418a32f94f 100644 (file)
@@ -28,6 +28,7 @@
 #include "hard-interface.h"
 #include "unicast.h"
 #include "soft-interface.h"
+#include "bridge_loop_avoidance.h"
 
 static void purge_orig(struct work_struct *work);
 
@@ -375,8 +376,6 @@ static void _purge_orig(struct bat_priv *bat_priv)
 
        gw_node_purge(bat_priv);
        gw_election(bat_priv);
-
-       softif_neigh_purge(bat_priv);
 }
 
 static void purge_orig(struct work_struct *work)
index 441f3db1bd91a3d51f1821126ce5e16e37af4c7a..59800e82371a428ab2ff501fa6f8f802af8bedb2 100644 (file)
@@ -90,6 +90,23 @@ enum tt_client_flags {
        TT_CLIENT_PENDING = 1 << 10
 };
 
+/* claim frame types for the bridge loop avoidance */
+enum bla_claimframe {
+       CLAIM_TYPE_ADD          = 0x00,
+       CLAIM_TYPE_DEL          = 0x01,
+       CLAIM_TYPE_ANNOUNCE     = 0x02,
+       CLAIM_TYPE_REQUEST      = 0x03
+};
+
+/* the destination hardware field in the ARP frame is used to
+ * transport the claim type and the group id
+ */
+struct bla_claim_dst {
+       uint8_t magic[3];       /* FF:43:05 */
+       uint8_t type;           /* bla_claimframe */
+       uint16_t group;         /* group id */
+} __packed;
+
 struct batman_header {
        uint8_t  packet_type;
        uint8_t  version;  /* batman version field */
@@ -100,8 +117,8 @@ struct batman_ogm_packet {
        struct batman_header header;
        uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
        uint32_t seqno;
-       uint8_t  orig[6];
-       uint8_t  prev_sender[6];
+       uint8_t  orig[ETH_ALEN];
+       uint8_t  prev_sender[ETH_ALEN];
        uint8_t  gw_flags;  /* flags related to gateway class */
        uint8_t  tq;
        uint8_t  tt_num_changes;
@@ -114,8 +131,8 @@ struct batman_ogm_packet {
 struct icmp_packet {
        struct batman_header header;
        uint8_t  msg_type; /* see ICMP message types above */
-       uint8_t  dst[6];
-       uint8_t  orig[6];
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  orig[ETH_ALEN];
        uint16_t seqno;
        uint8_t  uid;
        uint8_t  reserved;
@@ -128,8 +145,8 @@ struct icmp_packet {
 struct icmp_packet_rr {
        struct batman_header header;
        uint8_t  msg_type; /* see ICMP message types above */
-       uint8_t  dst[6];
-       uint8_t  orig[6];
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  orig[ETH_ALEN];
        uint16_t seqno;
        uint8_t  uid;
        uint8_t  rr_cur;
@@ -139,16 +156,16 @@ struct icmp_packet_rr {
 struct unicast_packet {
        struct batman_header header;
        uint8_t  ttvn; /* destination translation table version number */
-       uint8_t  dest[6];
+       uint8_t  dest[ETH_ALEN];
 } __packed;
 
 struct unicast_frag_packet {
        struct batman_header header;
        uint8_t  ttvn; /* destination translation table version number */
-       uint8_t  dest[6];
+       uint8_t  dest[ETH_ALEN];
        uint8_t  flags;
        uint8_t  align;
-       uint8_t  orig[6];
+       uint8_t  orig[ETH_ALEN];
        uint16_t seqno;
 } __packed;
 
@@ -156,7 +173,7 @@ struct bcast_packet {
        struct batman_header header;
        uint8_t  reserved;
        uint32_t seqno;
-       uint8_t  orig[6];
+       uint8_t  orig[ETH_ALEN];
 } __packed;
 
 struct vis_packet {
@@ -165,9 +182,9 @@ struct vis_packet {
        uint32_t seqno;          /* sequence number */
        uint8_t  entries;        /* number of entries behind this struct */
        uint8_t  reserved;
-       uint8_t  vis_orig[6];    /* originator that announces its neighbors */
-       uint8_t  target_orig[6]; /* who should receive this packet */
-       uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
+       uint8_t  vis_orig[ETH_ALEN];    /* originator reporting its neighbors */
+       uint8_t  target_orig[ETH_ALEN]; /* who should receive this packet */
+       uint8_t  sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
 } __packed;
 
 struct tt_query_packet {
index 7f8e15899417c351c5344640682acb26a23b852a..78eddc9067e67457cbf68a3c5408178591ac2521 100644 (file)
 #include "originator.h"
 #include "vis.h"
 #include "unicast.h"
+#include "bridge_loop_avoidance.h"
+
+static int route_unicast_packet(struct sk_buff *skb,
+                               struct hard_iface *recv_if);
 
 void slide_own_bcast_window(struct hard_iface *hard_iface)
 {
@@ -52,7 +56,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
 
                        bit_get_packet(bat_priv, word, 1, 0);
                        orig_node->bcast_own_sum[hard_iface->if_num] =
-                               bit_packet_count(word);
+                               bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
                        spin_unlock_bh(&orig_node->ogm_cnt_lock);
                }
                rcu_read_unlock();
@@ -669,6 +673,13 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
        if (!is_my_mac(roam_adv_packet->dst))
                return route_unicast_packet(skb, recv_if);
 
+       /* check if it is a backbone gateway. we don't accept
+        * roaming advertisement from it, as it has the same
+        * entries as we have.
+        */
+       if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
+               goto out;
+
        orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
        if (!orig_node)
                goto out;
@@ -798,7 +809,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
        return 0;
 }
 
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct orig_node *orig_node = NULL;
@@ -1047,8 +1058,8 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
        spin_lock_bh(&orig_node->bcast_seqno_lock);
 
        /* check whether the packet is a duplicate */
-       if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
-                          ntohl(bcast_packet->seqno)))
+       if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+                        ntohl(bcast_packet->seqno)))
                goto spin_unlock;
 
        seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
@@ -1065,9 +1076,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 
        spin_unlock_bh(&orig_node->bcast_seqno_lock);
 
+       /* check whether this has been sent by another originator before */
+       if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+               goto out;
+
        /* rebroadcast packet */
        add_bcast_packet_to_list(bat_priv, skb, 1);
 
+       /* don't hand the broadcast up if it is from an originator
+        * from the same backbone.
+        */
+       if (bla_is_backbone_gw(skb, orig_node, hdr_size))
+               goto out;
+
        /* broadcast for me */
        interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
        ret = NET_RX_SUCCESS;
index 92ac100d83dad1557f3d2f0710dc2df7f05c4ed8..3d729cb17113af9aad13803cd136107b08d08b66 100644 (file)
@@ -25,7 +25,6 @@
 void slide_own_bcast_window(struct hard_iface *hard_iface);
 void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
                  struct neigh_node *neigh_node);
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
index a5590f4193f1f731cb9a562e952e72f02ed85d0e..efe0fbaadcd64ae1d9b6041242270f8349a844a8 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include "unicast.h"
+#include "bridge_loop_avoidance.h"
 
 
 static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -73,439 +74,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
        return 0;
 }
 
-static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
-{
-       if (atomic_dec_and_test(&softif_neigh->refcount))
-               kfree_rcu(softif_neigh, rcu);
-}
-
-static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
-{
-       struct softif_neigh_vid *softif_neigh_vid;
-       struct softif_neigh *softif_neigh;
-       struct hlist_node *node, *node_tmp;
-       struct bat_priv *bat_priv;
-
-       softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
-       bat_priv = softif_neigh_vid->bat_priv;
-
-       spin_lock_bh(&bat_priv->softif_neigh_lock);
-       hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
-                                 &softif_neigh_vid->softif_neigh_list, list) {
-               hlist_del_rcu(&softif_neigh->list);
-               softif_neigh_free_ref(softif_neigh);
-       }
-       spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-       kfree(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
-{
-       if (atomic_dec_and_test(&softif_neigh_vid->refcount))
-               call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
-}
-
-static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
-                                                    short vid)
-{
-       struct softif_neigh_vid *softif_neigh_vid;
-       struct hlist_node *node;
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(softif_neigh_vid, node,
-                                &bat_priv->softif_neigh_vids, list) {
-               if (softif_neigh_vid->vid != vid)
-                       continue;
-
-               if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
-                       continue;
-
-               goto out;
-       }
-
-       softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
-       if (!softif_neigh_vid)
-               goto out;
-
-       softif_neigh_vid->vid = vid;
-       softif_neigh_vid->bat_priv = bat_priv;
-
-       /* initialize with 2 - caller decrements counter by one */
-       atomic_set(&softif_neigh_vid->refcount, 2);
-       INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
-       INIT_HLIST_NODE(&softif_neigh_vid->list);
-       spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
-       hlist_add_head_rcu(&softif_neigh_vid->list,
-                          &bat_priv->softif_neigh_vids);
-       spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-out:
-       rcu_read_unlock();
-       return softif_neigh_vid;
-}
-
-static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
-                                            const uint8_t *addr, short vid)
-{
-       struct softif_neigh_vid *softif_neigh_vid;
-       struct softif_neigh *softif_neigh = NULL;
-       struct hlist_node *node;
-
-       softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-       if (!softif_neigh_vid)
-               goto out;
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(softif_neigh, node,
-                                &softif_neigh_vid->softif_neigh_list,
-                                list) {
-               if (!compare_eth(softif_neigh->addr, addr))
-                       continue;
-
-               if (!atomic_inc_not_zero(&softif_neigh->refcount))
-                       continue;
-
-               softif_neigh->last_seen = jiffies;
-               goto unlock;
-       }
-
-       softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
-       if (!softif_neigh)
-               goto unlock;
-
-       memcpy(softif_neigh->addr, addr, ETH_ALEN);
-       softif_neigh->last_seen = jiffies;
-       /* initialize with 2 - caller decrements counter by one */
-       atomic_set(&softif_neigh->refcount, 2);
-
-       INIT_HLIST_NODE(&softif_neigh->list);
-       spin_lock_bh(&bat_priv->softif_neigh_lock);
-       hlist_add_head_rcu(&softif_neigh->list,
-                          &softif_neigh_vid->softif_neigh_list);
-       spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-unlock:
-       rcu_read_unlock();
-out:
-       if (softif_neigh_vid)
-               softif_neigh_vid_free_ref(softif_neigh_vid);
-       return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_get_selected(
-                               struct softif_neigh_vid *softif_neigh_vid)
-{
-       struct softif_neigh *softif_neigh;
-
-       rcu_read_lock();
-       softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
-       if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
-               softif_neigh = NULL;
-
-       rcu_read_unlock();
-       return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_vid_get_selected(
-                                               struct bat_priv *bat_priv,
-                                               short vid)
-{
-       struct softif_neigh_vid *softif_neigh_vid;
-       struct softif_neigh *softif_neigh = NULL;
-
-       softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-       if (!softif_neigh_vid)
-               goto out;
-
-       softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-out:
-       if (softif_neigh_vid)
-               softif_neigh_vid_free_ref(softif_neigh_vid);
-       return softif_neigh;
-}
-
-static void softif_neigh_vid_select(struct bat_priv *bat_priv,
-                                   struct softif_neigh *new_neigh,
-                                   short vid)
-{
-       struct softif_neigh_vid *softif_neigh_vid;
-       struct softif_neigh *curr_neigh;
-
-       softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-       if (!softif_neigh_vid)
-               goto out;
-
-       spin_lock_bh(&bat_priv->softif_neigh_lock);
-
-       if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
-               new_neigh = NULL;
-
-       curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
-                                              1);
-       rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
-
-       if ((curr_neigh) && (!new_neigh))
-               bat_dbg(DBG_ROUTES, bat_priv,
-                       "Removing mesh exit point on vid: %d (prev: %pM).\n",
-                       vid, curr_neigh->addr);
-       else if ((curr_neigh) && (new_neigh))
-               bat_dbg(DBG_ROUTES, bat_priv,
-                       "Changing mesh exit point on vid: %d from %pM to %pM.\n",
-                       vid, curr_neigh->addr, new_neigh->addr);
-       else if ((!curr_neigh) && (new_neigh))
-               bat_dbg(DBG_ROUTES, bat_priv,
-                       "Setting mesh exit point on vid: %d to %pM.\n",
-                       vid, new_neigh->addr);
-
-       if (curr_neigh)
-               softif_neigh_free_ref(curr_neigh);
-
-       spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-out:
-       if (softif_neigh_vid)
-               softif_neigh_vid_free_ref(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
-                                     struct softif_neigh_vid *softif_neigh_vid)
-{
-       struct softif_neigh *curr_neigh;
-       struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
-       struct hard_iface *primary_if = NULL;
-       struct hlist_node *node;
-
-       primary_if = primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       /* find new softif_neigh immediately to avoid temporary loops */
-       rcu_read_lock();
-       curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
-       hlist_for_each_entry_rcu(softif_neigh_tmp, node,
-                                &softif_neigh_vid->softif_neigh_list,
-                                list) {
-               if (softif_neigh_tmp == curr_neigh)
-                       continue;
-
-               /* we got a neighbor but its mac is 'bigger' than ours  */
-               if (memcmp(primary_if->net_dev->dev_addr,
-                          softif_neigh_tmp->addr, ETH_ALEN) < 0)
-                       continue;
-
-               if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
-                       continue;
-
-               softif_neigh = softif_neigh_tmp;
-               goto unlock;
-       }
-
-unlock:
-       rcu_read_unlock();
-out:
-       softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
-
-       if (primary_if)
-               hardif_free_ref(primary_if);
-       if (softif_neigh)
-               softif_neigh_free_ref(softif_neigh);
-}
-
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
-{
-       struct net_device *net_dev = (struct net_device *)seq->private;
-       struct bat_priv *bat_priv = netdev_priv(net_dev);
-       struct softif_neigh_vid *softif_neigh_vid;
-       struct softif_neigh *softif_neigh;
-       struct hard_iface *primary_if;
-       struct hlist_node *node, *node_tmp;
-       struct softif_neigh *curr_softif_neigh;
-       int ret = 0, last_seen_secs, last_seen_msecs;
-
-       primary_if = primary_if_get_selected(bat_priv);
-       if (!primary_if) {
-               ret = seq_printf(seq,
-                                "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-                                net_dev->name);
-               goto out;
-       }
-
-       if (primary_if->if_status != IF_ACTIVE) {
-               ret = seq_printf(seq,
-                                "BATMAN mesh %s disabled - primary interface not active\n",
-                                net_dev->name);
-               goto out;
-       }
-
-       seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(softif_neigh_vid, node,
-                                &bat_priv->softif_neigh_vids, list) {
-               seq_printf(seq, "     %-15s %s on vid: %d\n",
-                          "Originator", "last-seen", softif_neigh_vid->vid);
-
-               curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-
-               hlist_for_each_entry_rcu(softif_neigh, node_tmp,
-                                        &softif_neigh_vid->softif_neigh_list,
-                                        list) {
-                       last_seen_secs = jiffies_to_msecs(jiffies -
-                                               softif_neigh->last_seen) / 1000;
-                       last_seen_msecs = jiffies_to_msecs(jiffies -
-                                               softif_neigh->last_seen) % 1000;
-                       seq_printf(seq, "%s %pM  %3i.%03is\n",
-                                  curr_softif_neigh == softif_neigh
-                                  ? "=>" : "  ", softif_neigh->addr,
-                                  last_seen_secs, last_seen_msecs);
-               }
-
-               if (curr_softif_neigh)
-                       softif_neigh_free_ref(curr_softif_neigh);
-
-               seq_printf(seq, "\n");
-       }
-       rcu_read_unlock();
-
-out:
-       if (primary_if)
-               hardif_free_ref(primary_if);
-       return ret;
-}
-
-void softif_neigh_purge(struct bat_priv *bat_priv)
-{
-       struct softif_neigh *softif_neigh, *curr_softif_neigh;
-       struct softif_neigh_vid *softif_neigh_vid;
-       struct hlist_node *node, *node_tmp, *node_tmp2;
-       int do_deselect;
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(softif_neigh_vid, node,
-                                &bat_priv->softif_neigh_vids, list) {
-               if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
-                       continue;
-
-               curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-               do_deselect = 0;
-
-               spin_lock_bh(&bat_priv->softif_neigh_lock);
-               hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
-                                         &softif_neigh_vid->softif_neigh_list,
-                                         list) {
-                       if ((!has_timed_out(softif_neigh->last_seen,
-                                           SOFTIF_NEIGH_TIMEOUT)) &&
-                           (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
-                               continue;
-
-                       if (curr_softif_neigh == softif_neigh) {
-                               bat_dbg(DBG_ROUTES, bat_priv,
-                                       "Current mesh exit point on vid: %d '%pM' vanished.\n",
-                                       softif_neigh_vid->vid,
-                                       softif_neigh->addr);
-                               do_deselect = 1;
-                       }
-
-                       hlist_del_rcu(&softif_neigh->list);
-                       softif_neigh_free_ref(softif_neigh);
-               }
-               spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-               /* soft_neigh_vid_deselect() needs to acquire the
-                * softif_neigh_lock */
-               if (do_deselect)
-                       softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
-
-               if (curr_softif_neigh)
-                       softif_neigh_free_ref(curr_softif_neigh);
-
-               softif_neigh_vid_free_ref(softif_neigh_vid);
-       }
-       rcu_read_unlock();
-
-       spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
-       hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
-                                 &bat_priv->softif_neigh_vids, list) {
-               if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
-                       continue;
-
-               hlist_del_rcu(&softif_neigh_vid->list);
-               softif_neigh_vid_free_ref(softif_neigh_vid);
-       }
-       spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-}
-
-static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
-                              short vid)
-{
-       struct bat_priv *bat_priv = netdev_priv(dev);
-       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-       struct batman_ogm_packet *batman_ogm_packet;
-       struct softif_neigh *softif_neigh = NULL;
-       struct hard_iface *primary_if = NULL;
-       struct softif_neigh *curr_softif_neigh = NULL;
-
-       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
-               batman_ogm_packet = (struct batman_ogm_packet *)
-                                       (skb->data + ETH_HLEN + VLAN_HLEN);
-       else
-               batman_ogm_packet = (struct batman_ogm_packet *)
-                                                       (skb->data + ETH_HLEN);
-
-       if (batman_ogm_packet->header.version != COMPAT_VERSION)
-               goto out;
-
-       if (batman_ogm_packet->header.packet_type != BAT_OGM)
-               goto out;
-
-       if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
-               goto out;
-
-       if (is_my_mac(batman_ogm_packet->orig))
-               goto out;
-
-       softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
-       if (!softif_neigh)
-               goto out;
-
-       curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-       if (curr_softif_neigh == softif_neigh)
-               goto out;
-
-       primary_if = primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       /* we got a neighbor but its mac is 'bigger' than ours  */
-       if (memcmp(primary_if->net_dev->dev_addr,
-                  softif_neigh->addr, ETH_ALEN) < 0)
-               goto out;
-
-       /* close own batX device and use softif_neigh as exit node */
-       if (!curr_softif_neigh) {
-               softif_neigh_vid_select(bat_priv, softif_neigh, vid);
-               goto out;
-       }
-
-       /* switch to new 'smallest neighbor' */
-       if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
-               softif_neigh_vid_select(bat_priv, softif_neigh, vid);
-
-out:
-       kfree_skb(skb);
-       if (softif_neigh)
-               softif_neigh_free_ref(softif_neigh);
-       if (curr_softif_neigh)
-               softif_neigh_free_ref(curr_softif_neigh);
-       if (primary_if)
-               hardif_free_ref(primary_if);
-       return;
-}
-
 static int interface_open(struct net_device *dev)
 {
        netif_start_queue(dev);
@@ -562,10 +130,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
        struct hard_iface *primary_if = NULL;
        struct bcast_packet *bcast_packet;
        struct vlan_ethhdr *vhdr;
-       struct softif_neigh *curr_softif_neigh = NULL;
+       static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
+                                                  0x00};
        unsigned int header_len = 0;
        int data_len = skb->len, ret;
-       short vid = -1;
+       short vid __maybe_unused = -1;
        bool do_bcast = false;
 
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
@@ -583,21 +152,21 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
 
                /* fall through */
        case ETH_P_BATMAN:
-               softif_batman_recv(skb, soft_iface, vid);
-               goto end;
+               goto dropped;
        }
 
-       /**
-        * if we have a another chosen mesh exit node in range
-        * it will transport the packets to the mesh
-        */
-       curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-       if (curr_softif_neigh)
+       if (bla_tx(bat_priv, skb, vid))
                goto dropped;
 
        /* Register the client MAC in the transtable */
        tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
 
+       /* don't accept stp packets. STP does not help in meshes.
+        * better use the bridge loop avoidance ...
+        */
+       if (compare_eth(ethhdr->h_dest, stp_addr))
+               goto dropped;
+
        if (is_multicast_ether_addr(ethhdr->h_dest)) {
                do_bcast = true;
 
@@ -675,8 +244,6 @@ dropped:
 dropped_freed:
        bat_priv->stats.tx_dropped++;
 end:
-       if (curr_softif_neigh)
-               softif_neigh_free_ref(curr_softif_neigh);
        if (primary_if)
                hardif_free_ref(primary_if);
        return NETDEV_TX_OK;
@@ -687,12 +254,9 @@ void interface_rx(struct net_device *soft_iface,
                  int hdr_size)
 {
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
-       struct unicast_packet *unicast_packet;
        struct ethhdr *ethhdr;
        struct vlan_ethhdr *vhdr;
-       struct softif_neigh *curr_softif_neigh = NULL;
-       short vid = -1;
-       int ret;
+       short vid __maybe_unused = -1;
 
        /* check if enough space is available for pulling, and pull */
        if (!pskb_may_pull(skb, hdr_size))
@@ -716,30 +280,6 @@ void interface_rx(struct net_device *soft_iface,
                goto dropped;
        }
 
-       /**
-        * if we have a another chosen mesh exit node in range
-        * it will transport the packets to the non-mesh network
-        */
-       curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-       if (curr_softif_neigh) {
-               skb_push(skb, hdr_size);
-               unicast_packet = (struct unicast_packet *)skb->data;
-
-               if ((unicast_packet->header.packet_type != BAT_UNICAST) &&
-                   (unicast_packet->header.packet_type != BAT_UNICAST_FRAG))
-                       goto dropped;
-
-               skb_reset_mac_header(skb);
-
-               memcpy(unicast_packet->dest,
-                      curr_softif_neigh->addr, ETH_ALEN);
-               ret = route_unicast_packet(skb, recv_if);
-               if (ret == NET_RX_DROP)
-                       goto dropped;
-
-               goto out;
-       }
-
        /* skb->dev & skb->pkt_type are set here */
        if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
                goto dropped;
@@ -759,14 +299,18 @@ void interface_rx(struct net_device *soft_iface,
        if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
                goto dropped;
 
+       /* Let the bridge loop avoidance check the packet. If will
+        * not handle it, we can safely push it up.
+        */
+       if (bla_rx(bat_priv, skb, vid))
+               goto out;
+
        netif_rx(skb);
        goto out;
 
 dropped:
        kfree_skb(skb);
 out:
-       if (curr_softif_neigh)
-               softif_neigh_free_ref(curr_softif_neigh);
        return;
 }
 
@@ -828,13 +372,14 @@ struct net_device *softif_create(const char *name)
 
        atomic_set(&bat_priv->aggregated_ogms, 1);
        atomic_set(&bat_priv->bonding, 0);
+       atomic_set(&bat_priv->bridge_loop_avoidance, 0);
        atomic_set(&bat_priv->ap_isolation, 0);
        atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
        atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
        atomic_set(&bat_priv->gw_sel_class, 20);
        atomic_set(&bat_priv->gw_bandwidth, 41);
        atomic_set(&bat_priv->orig_interval, 1000);
-       atomic_set(&bat_priv->hop_penalty, 10);
+       atomic_set(&bat_priv->hop_penalty, 30);
        atomic_set(&bat_priv->log_level, 0);
        atomic_set(&bat_priv->fragmentation, 1);
        atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
@@ -845,6 +390,7 @@ struct net_device *softif_create(const char *name)
        atomic_set(&bat_priv->ttvn, 0);
        atomic_set(&bat_priv->tt_local_changes, 0);
        atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+       atomic_set(&bat_priv->bla_num_requests, 0);
 
        bat_priv->tt_buff = NULL;
        bat_priv->tt_buff_len = 0;
index 756eab5b8dd481766019b5777f4c731238485872..0203006738847b0795663848341360df6d507eb1 100644 (file)
@@ -23,8 +23,6 @@
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
 int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
-void softif_neigh_purge(struct bat_priv *bat_priv);
 void interface_rx(struct net_device *soft_iface,
                  struct sk_buff *skb, struct hard_iface *recv_if,
                  int hdr_size);
index 1f86921278404ce2f17597a2c9a53fe602021d3c..e16a3690bdb24d606326ec0735daa9d754650e1e 100644 (file)
 #include "hash.h"
 #include "originator.h"
 #include "routing.h"
+#include "bridge_loop_avoidance.h"
 
 #include <linux/crc16.h>
 
-static void _tt_global_del(struct bat_priv *bat_priv,
-                          struct tt_global_entry *tt_global_entry,
-                          const char *message);
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+                         struct orig_node *orig_node);
 static void tt_purge(struct work_struct *work);
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
 
 /* returns 1 if they are the same mac addr */
 static int compare_tt(const struct hlist_node *node, const void *data2)
@@ -123,17 +124,31 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
        tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
                                       common);
 
-       if (tt_global_entry->orig_node)
-               orig_node_free_ref(tt_global_entry->orig_node);
-
        kfree(tt_global_entry);
 }
 
 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
 {
-       if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+       if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
+               tt_global_del_orig_list(tt_global_entry);
                call_rcu(&tt_global_entry->common.rcu,
                         tt_global_entry_free_rcu);
+       }
+}
+
+static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+       struct tt_orig_list_entry *orig_entry;
+
+       orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
+       atomic_dec(&orig_entry->orig_node->tt_size);
+       orig_node_free_ref(orig_entry->orig_node);
+       kfree(orig_entry);
+}
+
+static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
+{
+       call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
 }
 
 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -182,6 +197,9 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct tt_orig_list_entry *orig_entry;
        int hash_added;
 
        tt_local_entry = tt_local_hash_find(bat_priv, addr);
@@ -232,14 +250,21 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
        /* Check whether it is a roaming! */
        if (tt_global_entry) {
-               /* This node is probably going to update its tt table */
-               tt_global_entry->orig_node->tt_poss_change = true;
-               /* The global entry has to be marked as ROAMING and has to be
-                * kept for consistency purpose */
+               /* These node are probably going to update their tt table */
+               head = &tt_global_entry->orig_list;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+                       orig_entry->orig_node->tt_poss_change = true;
+
+                       send_roam_adv(bat_priv, tt_global_entry->common.addr,
+                                     orig_entry->orig_node);
+               }
+               rcu_read_unlock();
+               /* The global entry has to be marked as ROAMING and
+                * has to be kept for consistency purpose
+                */
                tt_global_entry->common.flags |= TT_CLIENT_ROAM;
                tt_global_entry->roam_at = jiffies;
-               send_roam_adv(bat_priv, tt_global_entry->common.addr,
-                             tt_global_entry->orig_node);
        }
 out:
        if (tt_local_entry)
@@ -490,33 +515,76 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt_changes_list_lock);
 }
 
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns 1 if found, 0 otherwise
+ */
+static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
+                                    const struct orig_node *orig_node)
+{
+       struct tt_orig_list_entry *tmp_orig_entry;
+       const struct hlist_head *head;
+       struct hlist_node *node;
+       bool found = false;
+
+       rcu_read_lock();
+       head = &entry->orig_list;
+       hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
+               if (tmp_orig_entry->orig_node == orig_node) {
+                       found = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       return found;
+}
+
+static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
+                                    struct orig_node *orig_node,
+                                    int ttvn)
+{
+       struct tt_orig_list_entry *orig_entry;
+
+       orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
+       if (!orig_entry)
+               return;
+
+       INIT_HLIST_NODE(&orig_entry->list);
+       atomic_inc(&orig_node->refcount);
+       atomic_inc(&orig_node->tt_size);
+       orig_entry->orig_node = orig_node;
+       orig_entry->ttvn = ttvn;
+
+       spin_lock_bh(&tt_global_entry->list_lock);
+       hlist_add_head_rcu(&orig_entry->list,
+                          &tt_global_entry->orig_list);
+       spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
 /* caller must hold orig_node refcount */
 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                  const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
                  bool wifi)
 {
-       struct tt_global_entry *tt_global_entry;
-       struct orig_node *orig_node_tmp;
+       struct tt_global_entry *tt_global_entry = NULL;
        int ret = 0;
        int hash_added;
 
        tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
        if (!tt_global_entry) {
-               tt_global_entry =
-                       kmalloc(sizeof(*tt_global_entry),
-                               GFP_ATOMIC);
+               tt_global_entry = kzalloc(sizeof(*tt_global_entry),
+                                         GFP_ATOMIC);
                if (!tt_global_entry)
                        goto out;
 
                memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+
                tt_global_entry->common.flags = NO_FLAGS;
-               atomic_set(&tt_global_entry->common.refcount, 2);
-               /* Assign the new orig_node */
-               atomic_inc(&orig_node->refcount);
-               tt_global_entry->orig_node = orig_node;
-               tt_global_entry->ttvn = ttvn;
                tt_global_entry->roam_at = 0;
+               atomic_set(&tt_global_entry->common.refcount, 2);
+
+               INIT_HLIST_HEAD(&tt_global_entry->orig_list);
+               spin_lock_init(&tt_global_entry->list_lock);
 
                hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
                                 choose_orig, &tt_global_entry->common,
@@ -527,19 +595,27 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                        tt_global_entry_free_ref(tt_global_entry);
                        goto out_remove;
                }
-               atomic_inc(&orig_node->tt_size);
+
+               tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
        } else {
-               if (tt_global_entry->orig_node != orig_node) {
-                       atomic_dec(&tt_global_entry->orig_node->tt_size);
-                       orig_node_tmp = tt_global_entry->orig_node;
-                       atomic_inc(&orig_node->refcount);
-                       tt_global_entry->orig_node = orig_node;
-                       orig_node_free_ref(orig_node_tmp);
-                       atomic_inc(&orig_node->tt_size);
+               /* there is already a global entry, use this one. */
+
+               /* If there is the TT_CLIENT_ROAM flag set, there is only one
+                * originator left in the list and we previously received a
+                * delete + roaming change for this originator.
+                *
+                * We should first delete the old originator before adding the
+                * new one.
+                */
+               if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
+                       tt_global_del_orig_list(tt_global_entry);
+                       tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
+                       tt_global_entry->roam_at = 0;
                }
-               tt_global_entry->common.flags = NO_FLAGS;
-               tt_global_entry->ttvn = ttvn;
-               tt_global_entry->roam_at = 0;
+
+               if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
+                       tt_global_add_orig_entry(tt_global_entry, orig_node,
+                                                ttvn);
        }
 
        if (wifi)
@@ -560,6 +636,34 @@ out:
        return ret;
 }
 
+/* print all orig nodes who announce the address for this global entry.
+ * it is assumed that the caller holds rcu_read_lock();
+ */
+static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
+                                 struct seq_file *seq)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct tt_orig_list_entry *orig_entry;
+       struct tt_common_entry *tt_common_entry;
+       uint16_t flags;
+       uint8_t last_ttvn;
+
+       tt_common_entry = &tt_global_entry->common;
+
+       head = &tt_global_entry->orig_list;
+
+       hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+               flags = tt_common_entry->flags;
+               last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
+               seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
+                          tt_global_entry->common.addr, orig_entry->ttvn,
+                          orig_entry->orig_node->orig, last_ttvn,
+                          (flags & TT_CLIENT_ROAM ? 'R' : '.'),
+                          (flags & TT_CLIENT_WIFI ? 'W' : '.'));
+       }
+}
+
 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -603,18 +707,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
                        tt_global_entry = container_of(tt_common_entry,
                                                       struct tt_global_entry,
                                                       common);
-                       seq_printf(seq,
-                                  " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
-                                  tt_global_entry->common.addr,
-                                  tt_global_entry->ttvn,
-                                  tt_global_entry->orig_node->orig,
-                                  (uint8_t) atomic_read(
-                                               &tt_global_entry->orig_node->
-                                               last_ttvn),
-                                  (tt_global_entry->common.flags &
-                                   TT_CLIENT_ROAM ? 'R' : '.'),
-                                  (tt_global_entry->common.flags &
-                                   TT_CLIENT_WIFI ? 'W' : '.'));
+                       tt_global_print_entry(tt_global_entry, seq);
                }
                rcu_read_unlock();
        }
@@ -624,59 +717,150 @@ out:
        return ret;
 }
 
-static void _tt_global_del(struct bat_priv *bat_priv,
-                          struct tt_global_entry *tt_global_entry,
-                          const char *message)
+/* deletes the orig list of a tt_global_entry */
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
 {
-       if (!tt_global_entry)
-               goto out;
+       struct hlist_head *head;
+       struct hlist_node *node, *safe;
+       struct tt_orig_list_entry *orig_entry;
 
-       bat_dbg(DBG_TT, bat_priv,
-               "Deleting global tt entry %pM (via %pM): %s\n",
-               tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
-               message);
+       spin_lock_bh(&tt_global_entry->list_lock);
+       head = &tt_global_entry->orig_list;
+       hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+               hlist_del_rcu(node);
+               tt_orig_list_entry_free_ref(orig_entry);
+       }
+       spin_unlock_bh(&tt_global_entry->list_lock);
 
-       atomic_dec(&tt_global_entry->orig_node->tt_size);
+}
+
+static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
+                                    struct tt_global_entry *tt_global_entry,
+                                    struct orig_node *orig_node,
+                                    const char *message)
+{
+       struct hlist_head *head;
+       struct hlist_node *node, *safe;
+       struct tt_orig_list_entry *orig_entry;
+
+       spin_lock_bh(&tt_global_entry->list_lock);
+       head = &tt_global_entry->orig_list;
+       hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+               if (orig_entry->orig_node == orig_node) {
+                       bat_dbg(DBG_TT, bat_priv,
+                               "Deleting %pM from global tt entry %pM: %s\n",
+                               orig_node->orig, tt_global_entry->common.addr,
+                               message);
+                       hlist_del_rcu(node);
+                       tt_orig_list_entry_free_ref(orig_entry);
+               }
+       }
+       spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
+static void tt_global_del_struct(struct bat_priv *bat_priv,
+                                struct tt_global_entry *tt_global_entry,
+                                const char *message)
+{
+       bat_dbg(DBG_TT, bat_priv,
+               "Deleting global tt entry %pM: %s\n",
+               tt_global_entry->common.addr, message);
 
        hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
                    tt_global_entry->common.addr);
-out:
-       if (tt_global_entry)
-               tt_global_entry_free_ref(tt_global_entry);
+       tt_global_entry_free_ref(tt_global_entry);
+
 }
 
-void tt_global_del(struct bat_priv *bat_priv,
-                  struct orig_node *orig_node, const unsigned char *addr,
-                  const char *message, bool roaming)
+/* If the client is to be deleted, we check if it is the last origantor entry
+ * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
+ * otherwise we simply remove the originator scheduled for deletion.
+ */
+static void tt_global_del_roaming(struct bat_priv *bat_priv,
+                                 struct tt_global_entry *tt_global_entry,
+                                 struct orig_node *orig_node,
+                                 const char *message)
+{
+       bool last_entry = true;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct tt_orig_list_entry *orig_entry;
+
+       /* no local entry exists, case 1:
+        * Check if this is the last one or if other entries exist.
+        */
+
+       rcu_read_lock();
+       head = &tt_global_entry->orig_list;
+       hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+               if (orig_entry->orig_node != orig_node) {
+                       last_entry = false;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       if (last_entry) {
+               /* its the last one, mark for roaming. */
+               tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+               tt_global_entry->roam_at = jiffies;
+       } else
+               /* there is another entry, we can simply delete this
+                * one and can still use the other one.
+                */
+               tt_global_del_orig_entry(bat_priv, tt_global_entry,
+                                        orig_node, message);
+}
+
+
+
+static void tt_global_del(struct bat_priv *bat_priv,
+                         struct orig_node *orig_node,
+                         const unsigned char *addr,
+                         const char *message, bool roaming)
 {
        struct tt_global_entry *tt_global_entry = NULL;
        struct tt_local_entry *tt_local_entry = NULL;
 
        tt_global_entry = tt_global_hash_find(bat_priv, addr);
-       if (!tt_global_entry || tt_global_entry->orig_node != orig_node)
+       if (!tt_global_entry)
                goto out;
 
-       if (!roaming)
-               goto out_del;
+       if (!roaming) {
+               tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
+                                        message);
+
+               if (hlist_empty(&tt_global_entry->orig_list))
+                       tt_global_del_struct(bat_priv, tt_global_entry,
+                                            message);
+
+               goto out;
+       }
 
        /* if we are deleting a global entry due to a roam
         * event, there are two possibilities:
-        * 1) the client roamed from node A to node B => we mark
+        * 1) the client roamed from node A to node B => if there
+        *    is only one originator left for this client, we mark
         *    it with TT_CLIENT_ROAM, we start a timer and we
         *    wait for node B to claim it. In case of timeout
         *    the entry is purged.
+        *
+        *    If there are other originators left, we directly delete
+        *    the originator.
         * 2) the client roamed to us => we can directly delete
         *    the global entry, since it is useless now. */
+
        tt_local_entry = tt_local_hash_find(bat_priv,
                                            tt_global_entry->common.addr);
-       if (!tt_local_entry) {
-               tt_global_entry->common.flags |= TT_CLIENT_ROAM;
-               tt_global_entry->roam_at = jiffies;
-               goto out;
-       }
+       if (tt_local_entry) {
+               /* local entry exists, case 2: client roamed to us. */
+               tt_global_del_orig_list(tt_global_entry);
+               tt_global_del_struct(bat_priv, tt_global_entry, message);
+       } else
+               /* no local entry exists, case 1: check for roaming */
+               tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
+                                     message);
 
-out_del:
-       _tt_global_del(bat_priv, tt_global_entry, message);
 
 out:
        if (tt_global_entry)
@@ -709,11 +893,14 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                        tt_global_entry = container_of(tt_common_entry,
                                                       struct tt_global_entry,
                                                       common);
-                       if (tt_global_entry->orig_node == orig_node) {
+
+                       tt_global_del_orig_entry(bat_priv, tt_global_entry,
+                                                orig_node, message);
+
+                       if (hlist_empty(&tt_global_entry->orig_list)) {
                                bat_dbg(DBG_TT, bat_priv,
-                                       "Deleting global tt entry %pM (via %pM): %s\n",
+                                       "Deleting global tt entry %pM: %s\n",
                                        tt_global_entry->common.addr,
-                                       tt_global_entry->orig_node->orig,
                                        message);
                                hlist_del_rcu(node);
                                tt_global_entry_free_ref(tt_global_entry);
@@ -754,7 +941,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
                        bat_dbg(DBG_TT, bat_priv,
                                "Deleting global tt entry (%pM): Roaming timeout\n",
                                tt_global_entry->common.addr);
-                       atomic_dec(&tt_global_entry->orig_node->tt_size);
+
                        hlist_del_rcu(node);
                        tt_global_entry_free_ref(tt_global_entry);
                }
@@ -817,6 +1004,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
        struct orig_node *orig_node = NULL;
+       struct neigh_node *router = NULL;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct tt_orig_list_entry *orig_entry;
+       int best_tq;
 
        if (src && atomic_read(&bat_priv->ap_isolation)) {
                tt_local_entry = tt_local_hash_find(bat_priv, src);
@@ -833,11 +1025,25 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
        if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
                goto out;
 
-       if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
-               goto out;
+       best_tq = 0;
 
-       orig_node = tt_global_entry->orig_node;
+       rcu_read_lock();
+       head = &tt_global_entry->orig_list;
+       hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+               router = orig_node_get_router(orig_entry->orig_node);
+               if (!router)
+                       continue;
 
+               if (router->tq_avg > best_tq) {
+                       orig_node = orig_entry->orig_node;
+                       best_tq = router->tq_avg;
+               }
+               neigh_node_free_ref(router);
+       }
+       /* found anything? */
+       if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
+               orig_node = NULL;
+       rcu_read_unlock();
 out:
        if (tt_global_entry)
                tt_global_entry_free_ref(tt_global_entry);
@@ -848,7 +1054,8 @@ out:
 }
 
 /* Calculates the checksum of the local table of a given orig_node */
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
+static uint16_t tt_global_crc(struct bat_priv *bat_priv,
+                             struct orig_node *orig_node)
 {
        uint16_t total = 0, total_one;
        struct hashtable_t *hash = bat_priv->tt_global_hash;
@@ -868,20 +1075,26 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
                        tt_global_entry = container_of(tt_common_entry,
                                                       struct tt_global_entry,
                                                       common);
-                       if (compare_eth(tt_global_entry->orig_node,
-                                       orig_node)) {
-                               /* Roaming clients are in the global table for
-                                * consistency only. They don't have to be
-                                * taken into account while computing the
-                                * global crc */
-                               if (tt_common_entry->flags & TT_CLIENT_ROAM)
-                                       continue;
-                               total_one = 0;
-                               for (j = 0; j < ETH_ALEN; j++)
-                                       total_one = crc16_byte(total_one,
-                                               tt_common_entry->addr[j]);
-                               total ^= total_one;
-                       }
+                       /* Roaming clients are in the global table for
+                        * consistency only. They don't have to be
+                        * taken into account while computing the
+                        * global crc
+                        */
+                       if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
+                               continue;
+
+                       /* find out if this global entry is announced by this
+                        * originator
+                        */
+                       if (!tt_global_entry_has_orig(tt_global_entry,
+                                                     orig_node))
+                               continue;
+
+                       total_one = 0;
+                       for (j = 0; j < ETH_ALEN; j++)
+                               total_one = crc16_byte(total_one,
+                                       tt_global_entry->common.addr[j]);
+                       total ^= total_one;
                }
                rcu_read_unlock();
        }
@@ -936,8 +1149,10 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt_req_list_lock);
 }
 
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
-                        const unsigned char *tt_buff, uint8_t tt_num_changes)
+static void tt_save_orig_buffer(struct bat_priv *bat_priv,
+                               struct orig_node *orig_node,
+                               const unsigned char *tt_buff,
+                               uint8_t tt_num_changes)
 {
        uint16_t tt_buff_len = tt_len(tt_num_changes);
 
@@ -1020,7 +1235,7 @@ static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
        tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
                                       common);
 
-       return (tt_global_entry->orig_node == orig_node);
+       return tt_global_entry_has_orig(tt_global_entry, orig_node);
 }
 
 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
@@ -1401,10 +1616,15 @@ out:
 bool send_tt_response(struct bat_priv *bat_priv,
                      struct tt_query_packet *tt_request)
 {
-       if (is_my_mac(tt_request->dst))
+       if (is_my_mac(tt_request->dst)) {
+               /* don't answer backbone gws! */
+               if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
+                       return true;
+
                return send_my_tt_response(bat_priv, tt_request);
-       else
+       } else {
                return send_other_tt_response(bat_priv, tt_request);
+       }
 }
 
 static void _tt_update_changes(struct bat_priv *bat_priv,
@@ -1508,6 +1728,10 @@ void handle_tt_response(struct bat_priv *bat_priv,
                tt_response->src, tt_response->ttvn, tt_response->tt_data,
                (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
 
+       /* we should have never asked a backbone gw */
+       if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
+               goto out;
+
        orig_node = orig_hash_find(bat_priv, tt_response->src);
        if (!orig_node)
                goto out;
@@ -1627,8 +1851,8 @@ unlock:
        return ret;
 }
 
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
-                  struct orig_node *orig_node)
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+                         struct orig_node *orig_node)
 {
        struct neigh_node *neigh_node = NULL;
        struct sk_buff *skb = NULL;
@@ -1796,6 +2020,8 @@ void tt_commit_changes(struct bat_priv *bat_priv)
 
        /* Increment the TTVN only once per OGM interval */
        atomic_inc(&bat_priv->ttvn);
+       bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
+               (uint8_t)atomic_read(&bat_priv->ttvn));
        bat_priv->tt_poss_change = false;
 }
 
@@ -1836,6 +2062,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
        uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
        bool full_table = true;
 
+       /* don't care about a backbone gateways updates. */
+       if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+               return;
+
        /* orig table not initialised AND first diff is in the OGM OR the ttvn
         * increased by one -> we can apply the attached changes */
        if ((!orig_node->tt_initialised && ttvn == 1) ||
@@ -1873,6 +2103,7 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
        } else {
                /* if we missed more than one change or our tables are not
                 * in sync anymore -> request fresh tt data */
+
                if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
                    orig_node->tt_crc != tt_crc) {
 request_table:
index c753633b1da130d0e65eba2d120847647ea67457..bfebe26edd8ee678be1b759f0f2225d3e4345ec3 100644 (file)
@@ -39,23 +39,15 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
 int tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void tt_global_del_orig(struct bat_priv *bat_priv,
                        struct orig_node *orig_node, const char *message);
-void tt_global_del(struct bat_priv *bat_priv,
-                  struct orig_node *orig_node, const unsigned char *addr,
-                  const char *message, bool roaming);
 struct orig_node *transtable_search(struct bat_priv *bat_priv,
                                    const uint8_t *src, const uint8_t *addr);
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
-                        const unsigned char *tt_buff, uint8_t tt_num_changes);
 uint16_t tt_local_crc(struct bat_priv *bat_priv);
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void tt_free(struct bat_priv *bat_priv);
 bool send_tt_response(struct bat_priv *bat_priv,
                      struct tt_query_packet *tt_request);
 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
 void handle_tt_response(struct bat_priv *bat_priv,
                        struct tt_query_packet *tt_response);
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
-                  struct orig_node *orig_node);
 void tt_commit_changes(struct bat_priv *bat_priv);
 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
index 302efb523475a093a10569246a2ec9cf085ff2db..a5b1a6333deffdc854a706c91270411007864b40 100644 (file)
@@ -90,7 +90,7 @@ struct orig_node {
        bool tt_poss_change;
        uint32_t last_real_seqno;
        uint8_t last_ttl;
-       unsigned long bcast_bits[NUM_WORDS];
+       DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
        uint32_t last_bcast_seqno;
        struct hlist_head neigh_list;
        struct list_head frag_list;
@@ -132,7 +132,7 @@ struct neigh_node {
        uint8_t last_ttl;
        struct list_head bonding_list;
        unsigned long last_valid;
-       unsigned long real_bits[NUM_WORDS];
+       DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
        atomic_t refcount;
        struct rcu_head rcu;
        struct orig_node *orig_node;
@@ -140,6 +140,13 @@ struct neigh_node {
        spinlock_t tq_lock;     /* protects: tq_recv, tq_index */
 };
 
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct bcast_duplist_entry {
+       uint8_t orig[ETH_ALEN];
+       uint16_t crc;
+       unsigned long entrytime;
+};
+#endif
 
 struct bat_priv {
        atomic_t mesh_state;
@@ -148,6 +155,7 @@ struct bat_priv {
        atomic_t bonding;               /* boolean */
        atomic_t fragmentation;         /* boolean */
        atomic_t ap_isolation;          /* boolean */
+       atomic_t bridge_loop_avoidance; /* boolean */
        atomic_t vis_mode;              /* VIS_TYPE_* */
        atomic_t gw_mode;               /* GW_MODE_* */
        atomic_t gw_sel_class;          /* uint */
@@ -161,6 +169,7 @@ struct bat_priv {
        atomic_t ttvn; /* translation table version number */
        atomic_t tt_ogm_append_cnt;
        atomic_t tt_local_changes; /* changes registered in a OGM interval */
+       atomic_t bla_num_requests; /* number of bla requests in flight */
        /* The tt_poss_change flag is used to detect an ongoing roaming phase.
         * If true, then I received a Roaming_adv and I have to inspect every
         * packet directed to me to check whether I am still the true
@@ -174,15 +183,23 @@ struct bat_priv {
        struct hlist_head forw_bat_list;
        struct hlist_head forw_bcast_list;
        struct hlist_head gw_list;
-       struct hlist_head softif_neigh_vids;
        struct list_head tt_changes_list; /* tracks changes in a OGM int */
        struct list_head vis_send_list;
        struct hashtable_t *orig_hash;
        struct hashtable_t *tt_local_hash;
        struct hashtable_t *tt_global_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+       struct hashtable_t *claim_hash;
+       struct hashtable_t *backbone_hash;
+#endif
        struct list_head tt_req_list; /* list of pending tt_requests */
        struct list_head tt_roam_list;
        struct hashtable_t *vis_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+       struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
+       int bcast_duplist_curr;
+       struct bla_claim_dst claim_dest;
+#endif
        spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
        spinlock_t forw_bcast_list_lock; /* protects  */
        spinlock_t tt_changes_list_lock; /* protects tt_changes */
@@ -191,8 +208,6 @@ struct bat_priv {
        spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
        spinlock_t vis_hash_lock; /* protects vis_hash */
        spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-       spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
-       spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
        atomic_t num_local_tt;
        /* Checksum of the local table, recomputed before sending a new OGM */
        atomic_t tt_crc;
@@ -202,6 +217,7 @@ struct bat_priv {
        struct delayed_work tt_work;
        struct delayed_work orig_work;
        struct delayed_work vis_work;
+       struct delayed_work bla_work;
        struct gw_node __rcu *curr_gw;  /* rcu protected pointer */
        atomic_t gw_reselect;
        struct hard_iface __rcu *primary_if;  /* rcu protected pointer */
@@ -239,10 +255,41 @@ struct tt_local_entry {
 
 struct tt_global_entry {
        struct tt_common_entry common;
+       struct hlist_head orig_list;
+       spinlock_t list_lock;   /* protects the list */
+       unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
+};
+
+struct tt_orig_list_entry {
        struct orig_node *orig_node;
        uint8_t ttvn;
-       unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
+       struct rcu_head rcu;
+       struct hlist_node list;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct backbone_gw {
+       uint8_t orig[ETH_ALEN];
+       short vid;              /* used VLAN ID */
+       struct hlist_node hash_entry;
+       struct bat_priv *bat_priv;
+       unsigned long lasttime; /* last time we heard of this backbone gw */
+       atomic_t request_sent;
+       atomic_t refcount;
+       struct rcu_head rcu;
+       uint16_t crc;           /* crc checksum over all claims */
+};
+
+struct claim {
+       uint8_t addr[ETH_ALEN];
+       short vid;
+       struct backbone_gw *backbone_gw;
+       unsigned long lasttime; /* last time we heard of claim (locals only) */
+       struct rcu_head rcu;
+       atomic_t refcount;
+       struct hlist_node hash_entry;
 };
+#endif
 
 struct tt_change_node {
        struct list_head list;
@@ -327,24 +374,6 @@ struct recvlist_node {
        uint8_t mac[ETH_ALEN];
 };
 
-struct softif_neigh_vid {
-       struct hlist_node list;
-       struct bat_priv *bat_priv;
-       short vid;
-       atomic_t refcount;
-       struct softif_neigh __rcu *softif_neigh;
-       struct rcu_head rcu;
-       struct hlist_head softif_neigh_list;
-};
-
-struct softif_neigh {
-       struct hlist_node list;
-       uint8_t addr[ETH_ALEN];
-       unsigned long last_seen;
-       atomic_t refcount;
-       struct rcu_head rcu;
-};
-
 struct bat_algo_ops {
        struct hlist_node list;
        char *name;
index 5ba0c844d508cbe549788e2219b4dd9ab1383149..80dbce4974ceafe0f7ed8360a87c84e8f034e86d 100644 (file)
@@ -487,14 +487,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
        ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
        ndm->ndm_state   = fdb_to_nud(fdb);
 
-       NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
-
+       if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
+               goto nla_put_failure;
        ci.ndm_used      = jiffies_to_clock_t(now - fdb->used);
        ci.ndm_confirmed = 0;
        ci.ndm_updated   = jiffies_to_clock_t(now - fdb->updated);
        ci.ndm_refcnt    = 0;
-       NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
-
+       if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+               goto nla_put_failure;
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
index a1daf8227ed11c1a1853a8fb5a246919e9deb5f5..346b368d86985c366648d7f2493b3b86804e2cd5 100644 (file)
@@ -60,20 +60,17 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
        hdr->ifi_flags = dev_get_flags(dev);
        hdr->ifi_change = 0;
 
-       NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-       NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex);
-       NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-       NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate);
-
-       if (dev->addr_len)
-               NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
-       if (dev->ifindex != dev->iflink)
-               NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
-       if (event == RTM_NEWLINK)
-               NLA_PUT_U8(skb, IFLA_PROTINFO, port->state);
-
+       if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+           nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
+           nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+           nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
+           (dev->addr_len &&
+            nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+           (dev->ifindex != dev->iflink &&
+            nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+           (event == RTM_NEWLINK &&
+            nla_put_u8(skb, IFLA_PROTINFO, port->state)))
+               goto nla_put_failure;
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
index 20618dd3088b79e0f1528436c7b1c9359f9c9cd6..93e9c6dc9ddfd5360d685843f0ebb271e102154a 100644 (file)
@@ -421,14 +421,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct chnl_net *priv;
        u8 loop;
        priv = netdev_priv(dev);
-       NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
-                   priv->conn_req.sockaddr.u.dgm.connection_id);
-       NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
-                   priv->conn_req.sockaddr.u.dgm.connection_id);
+       if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
+                       priv->conn_req.sockaddr.u.dgm.connection_id) ||
+           nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
+                       priv->conn_req.sockaddr.u.dgm.connection_id))
+               goto nla_put_failure;
        loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
-       NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
-
-
+       if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
+               goto nla_put_failure;
        return 0;
 nla_put_failure:
        return -EMSGSIZE;
index 6d6d7d25caaa30319870c6b1d62b38eb8972e543..beacdd93cd8fcd0fff5d95faff1415dd4e7beb67 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/errno.h>
 #include <linux/ethtool.h>
 #include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
@@ -36,6 +38,17 @@ u32 ethtool_op_get_link(struct net_device *dev)
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_SOFTWARE |
+               SOF_TIMESTAMPING_RX_SOFTWARE |
+               SOF_TIMESTAMPING_SOFTWARE;
+       info->phc_index = -1;
+       return 0;
+}
+EXPORT_SYMBOL(ethtool_op_get_ts_info);
+
 /* Handlers for each ethtool command */
 
 #define ETHTOOL_DEV_FEATURE_WORDS      ((NETDEV_FEATURE_COUNT + 31) / 32)
@@ -1278,6 +1291,40 @@ out:
        return ret;
 }
 
+static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
+{
+       int err = 0;
+       struct ethtool_ts_info info;
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       struct phy_device *phydev = dev->phydev;
+
+       memset(&info, 0, sizeof(info));
+       info.cmd = ETHTOOL_GET_TS_INFO;
+
+       if (phydev && phydev->drv && phydev->drv->ts_info) {
+
+               err = phydev->drv->ts_info(phydev, &info);
+
+       } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
+
+               err = ops->get_ts_info(dev, &info);
+
+       } else {
+               info.so_timestamping =
+                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                       SOF_TIMESTAMPING_SOFTWARE;
+               info.phc_index = -1;
+       }
+
+       if (err)
+               return err;
+
+       if (copy_to_user(useraddr, &info, sizeof(info)))
+               err = -EFAULT;
+
+       return err;
+}
+
 /* The main entry point in this file.  Called from net/core/dev.c */
 
 int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1295,11 +1342,13 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
                return -EFAULT;
 
        if (!dev->ethtool_ops) {
-               /* ETHTOOL_GDRVINFO does not require any driver support.
-                * It is also unprivileged and does not change anything,
-                * so we can take a shortcut to it. */
+               /* A few commands do not require any driver support,
+                * are unprivileged, and do not change anything, so we
+                * can take a shortcut to them. */
                if (ethcmd == ETHTOOL_GDRVINFO)
                        return ethtool_get_drvinfo(dev, useraddr);
+               else if (ethcmd == ETHTOOL_GET_TS_INFO)
+                       return ethtool_get_ts_info(dev, useraddr);
                else
                        return -EOPNOTSUPP;
        }
@@ -1330,6 +1379,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GRXCLSRULE:
        case ETHTOOL_GRXCLSRLALL:
        case ETHTOOL_GFEATURES:
+       case ETHTOOL_GET_TS_INFO:
                break;
        default:
                if (!capable(CAP_NET_ADMIN))
@@ -1496,6 +1546,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GET_DUMP_DATA:
                rc = ethtool_get_dump_data(dev, useraddr);
                break;
+       case ETHTOOL_GET_TS_INFO:
+               rc = ethtool_get_ts_info(dev, useraddr);
+               break;
        default:
                rc = -EOPNOTSUPP;
        }
index c02e63c908da6972228735ba247dfcda6798b233..72cceb79d0d4f9bd869fa828a7d04152487be5c7 100644 (file)
@@ -542,7 +542,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
        frh = nlmsg_data(nlh);
        frh->family = ops->family;
        frh->table = rule->table;
-       NLA_PUT_U32(skb, FRA_TABLE, rule->table);
+       if (nla_put_u32(skb, FRA_TABLE, rule->table))
+               goto nla_put_failure;
        frh->res1 = 0;
        frh->res2 = 0;
        frh->action = rule->action;
@@ -553,31 +554,28 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
                frh->flags |= FIB_RULE_UNRESOLVED;
 
        if (rule->iifname[0]) {
-               NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
-
+               if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
+                       goto nla_put_failure;
                if (rule->iifindex == -1)
                        frh->flags |= FIB_RULE_IIF_DETACHED;
        }
 
        if (rule->oifname[0]) {
-               NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
-
+               if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
+                       goto nla_put_failure;
                if (rule->oifindex == -1)
                        frh->flags |= FIB_RULE_OIF_DETACHED;
        }
 
-       if (rule->pref)
-               NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
-
-       if (rule->mark)
-               NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
-
-       if (rule->mark_mask || rule->mark)
-               NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
-
-       if (rule->target)
-               NLA_PUT_U32(skb, FRA_GOTO, rule->target);
-
+       if ((rule->pref &&
+            nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
+           (rule->mark &&
+            nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
+           ((rule->mark_mask || rule->mark) &&
+            nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
+           (rule->target &&
+            nla_put_u32(skb, FRA_GOTO, rule->target)))
+               goto nla_put_failure;
        if (ops->fill(rule, skb, frh) < 0)
                goto nla_put_failure;
 
index 6f755cca45206934444464da8b8bcb0289921717..95d05a6012d18bf664466fa5feb613f6e9b651d6 100644 (file)
@@ -317,6 +317,9 @@ load_b:
                case BPF_S_ANC_CPU:
                        A = raw_smp_processor_id();
                        continue;
+               case BPF_S_ANC_ALU_XOR_X:
+                       A ^= X;
+                       continue;
                case BPF_S_ANC_NLATTR: {
                        struct nlattr *nla;
 
@@ -561,6 +564,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
                        ANCILLARY(HATYPE);
                        ANCILLARY(RXHASH);
                        ANCILLARY(CPU);
+                       ANCILLARY(ALU_XOR_X);
                        }
                }
                ftest->code = code;
@@ -589,6 +593,67 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
 }
 EXPORT_SYMBOL(sk_filter_release_rcu);
 
+static int __sk_prepare_filter(struct sk_filter *fp)
+{
+       int err;
+
+       fp->bpf_func = sk_run_filter;
+
+       err = sk_chk_filter(fp->insns, fp->len);
+       if (err)
+               return err;
+
+       bpf_jit_compile(fp);
+       return 0;
+}
+
+/**
+ *     sk_unattached_filter_create - create an unattached filter
+ *     @fprog: the filter program
+ *     @sk: the socket to use
+ *
+ * Create a filter independent ofr any socket. We first run some
+ * sanity checks on it to make sure it does not explode on us later.
+ * If an error occurs or there is insufficient memory for the filter
+ * a negative errno code is returned. On success the return is zero.
+ */
+int sk_unattached_filter_create(struct sk_filter **pfp,
+                               struct sock_fprog *fprog)
+{
+       struct sk_filter *fp;
+       unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+       int err;
+
+       /* Make sure new filter is there and in the right amounts. */
+       if (fprog->filter == NULL)
+               return -EINVAL;
+
+       fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+       if (!fp)
+               return -ENOMEM;
+       memcpy(fp->insns, fprog->filter, fsize);
+
+       atomic_set(&fp->refcnt, 1);
+       fp->len = fprog->len;
+
+       err = __sk_prepare_filter(fp);
+       if (err)
+               goto free_mem;
+
+       *pfp = fp;
+       return 0;
+free_mem:
+       kfree(fp);
+       return err;
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
+
+void sk_unattached_filter_destroy(struct sk_filter *fp)
+{
+       sk_filter_release(fp);
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
+
 /**
  *     sk_attach_filter - attach a socket filter
  *     @fprog: the filter program
@@ -619,16 +684,13 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 
        atomic_set(&fp->refcnt, 1);
        fp->len = fprog->len;
-       fp->bpf_func = sk_run_filter;
 
-       err = sk_chk_filter(fp->insns, fp->len);
+       err = __sk_prepare_filter(fp);
        if (err) {
                sk_filter_uncharge(sk, fp);
                return err;
        }
 
-       bpf_jit_compile(fp);
-
        old_fp = rcu_dereference_protected(sk->sk_filter,
                                           sock_owned_by_user(sk));
        rcu_assign_pointer(sk->sk_filter, fp);
index 0452eb27a2724dcd3782f5acb15be04e7efba47e..ddedf211e588146f17eb2e147307d04991720170 100644 (file)
@@ -27,7 +27,8 @@
 static inline int
 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
 {
-       NLA_PUT(d->skb, type, size, buf);
+       if (nla_put(d->skb, type, size, buf))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
deleted file mode 100644 (file)
index 52d0a44..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/highmem.h>
-
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
-{
-#ifdef CONFIG_HIGHMEM
-       BUG_ON(in_irq());
-
-       local_bh_disable();
-#endif
-       return kmap_atomic(skb_frag_page(frag));
-}
-
-static inline void kunmap_skb_frag(void *vaddr)
-{
-       kunmap_atomic(vaddr);
-#ifdef CONFIG_HIGHMEM
-       local_bh_enable();
-#endif
-}
index 0a68045782d18a635d8071a8ce673f4d1dd95379..ac71765d6fd0d2d696d4a4618a218fd54bc665ed 100644 (file)
@@ -1768,29 +1768,29 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
        if (nest == NULL)
                return -ENOBUFS;
 
-       if (parms->dev)
-               NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
-
-       NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
-       NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
-       /* approximative value for deprecated QUEUE_LEN (in packets) */
-       NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
-                   DIV_ROUND_UP(parms->queue_len_bytes,
-                                SKB_TRUESIZE(ETH_FRAME_LEN)));
-       NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
-       NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
-       NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
-       NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
-       NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
-       NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
-                     parms->base_reachable_time);
-       NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
-       NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
-       NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
-       NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
-       NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
-       NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
-
+       if ((parms->dev &&
+            nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
+           nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
+           nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
+           /* approximative value for deprecated QUEUE_LEN (in packets) */
+           nla_put_u32(skb, NDTPA_QUEUE_LEN,
+                       DIV_ROUND_UP(parms->queue_len_bytes,
+                                    SKB_TRUESIZE(ETH_FRAME_LEN))) ||
+           nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
+           nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
+           nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
+           nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
+           nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
+           nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
+                         parms->base_reachable_time) ||
+           nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
+           nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
+                         parms->delay_probe_time) ||
+           nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
+           nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
+           nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
+           nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
+               goto nla_put_failure;
        return nla_nest_end(skb, nest);
 
 nla_put_failure:
@@ -1815,12 +1815,12 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
        ndtmsg->ndtm_pad1   = 0;
        ndtmsg->ndtm_pad2   = 0;
 
-       NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
-       NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
-       NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
-       NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
-       NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
-
+       if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
+           nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
+           nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
+           nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
+           nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+               goto nla_put_failure;
        {
                unsigned long now = jiffies;
                unsigned int flush_delta = now - tbl->last_flush;
@@ -1841,7 +1841,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
                rcu_read_unlock_bh();
 
-               NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
+               if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
+                       goto nla_put_failure;
        }
 
        {
@@ -1866,7 +1867,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                        ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
                }
 
-               NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
+               if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
+                       goto nla_put_failure;
        }
 
        BUG_ON(tbl->parms.dev);
@@ -2137,7 +2139,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
        ndm->ndm_type    = neigh->type;
        ndm->ndm_ifindex = neigh->dev->ifindex;
 
-       NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
+       if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
+               goto nla_put_failure;
 
        read_lock_bh(&neigh->lock);
        ndm->ndm_state   = neigh->nud_state;
@@ -2157,8 +2160,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
        ci.ndm_refcnt    = atomic_read(&neigh->refcnt) - 1;
        read_unlock_bh(&neigh->lock);
 
-       NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
-       NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
+       if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
+           nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+               goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);
 
@@ -2187,7 +2191,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
        ndm->ndm_ifindex = pn->dev->ifindex;
        ndm->ndm_state   = NUD_NONE;
 
-       NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
+       if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
+               goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);
 
index 495586232aa1d9adc46b1fe7e8352832c2312f21..97d0f2453a0e628f3eb8c7e674bf6d4e7159b665 100644 (file)
@@ -74,15 +74,14 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
                            int (*set)(struct net_device *, unsigned long))
 {
        struct net_device *net = to_net_dev(dev);
-       char *endp;
        unsigned long new;
        int ret = -EINVAL;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
-       new = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
+       ret = kstrtoul(buf, 0, &new);
+       if (ret)
                goto err;
 
        if (!rtnl_trylock())
index 90430b776ecef76ecb71b1ed4db964b541a382fc..545a969672ab778fdcced03da911b31a95ed6d2d 100644 (file)
@@ -607,7 +607,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
        for (i = 0; i < RTAX_MAX; i++) {
                if (metrics[i]) {
                        valid++;
-                       NLA_PUT_U32(skb, i+1, metrics[i]);
+                       if (nla_put_u32(skb, i+1, metrics[i]))
+                               goto nla_put_failure;
                }
        }
 
@@ -782,6 +783,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(4) /* IFLA_MTU */
               + nla_total_size(4) /* IFLA_LINK */
               + nla_total_size(4) /* IFLA_MASTER */
+              + nla_total_size(4) /* IFLA_PROMISCUITY */
               + nla_total_size(1) /* IFLA_OPERSTATE */
               + nla_total_size(1) /* IFLA_LINKMODE */
               + nla_total_size(ext_filter_mask
@@ -807,7 +809,8 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
                vf_port = nla_nest_start(skb, IFLA_VF_PORT);
                if (!vf_port)
                        goto nla_put_failure;
-               NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
+               if (nla_put_u32(skb, IFLA_PORT_VF, vf))
+                       goto nla_put_failure;
                err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
                if (err == -EMSGSIZE)
                        goto nla_put_failure;
@@ -891,25 +894,23 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
        ifm->ifi_flags = dev_get_flags(dev);
        ifm->ifi_change = change;
 
-       NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-       NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
-       NLA_PUT_U8(skb, IFLA_OPERSTATE,
-                  netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
-       NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
-       NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-       NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
-
-       if (dev->ifindex != dev->iflink)
-               NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
-       if (dev->master)
-               NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
-
-       if (dev->qdisc)
-               NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
-
-       if (dev->ifalias)
-               NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+       if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+           nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
+           nla_put_u8(skb, IFLA_OPERSTATE,
+                      netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
+           nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
+           nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+           nla_put_u32(skb, IFLA_GROUP, dev->group) ||
+           nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
+           (dev->ifindex != dev->iflink &&
+            nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+           (dev->master &&
+            nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
+           (dev->qdisc &&
+            nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+           (dev->ifalias &&
+            nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
+               goto nla_put_failure;
 
        if (1) {
                struct rtnl_link_ifmap map = {
@@ -920,12 +921,14 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        .dma         = dev->dma,
                        .port        = dev->if_port,
                };
-               NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
+               if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+                       goto nla_put_failure;
        }
 
        if (dev->addr_len) {
-               NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-               NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
+               if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
+                   nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
+                       goto nla_put_failure;
        }
 
        attr = nla_reserve(skb, IFLA_STATS,
@@ -942,8 +945,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                goto nla_put_failure;
        copy_rtnl_link_stats64(nla_data(attr), stats);
 
-       if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
-               NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+       if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
+           nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
+               goto nla_put_failure;
 
        if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
            && (ext_filter_mask & RTEXT_FILTER_VF)) {
@@ -986,12 +990,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                                nla_nest_cancel(skb, vfinfo);
                                goto nla_put_failure;
                        }
-                       NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
-                       NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
-                       NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
-                               &vf_tx_rate);
-                       NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
-                               &vf_spoofchk);
+                       if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
+                           nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
+                           nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
+                                   &vf_tx_rate) ||
+                           nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
+                                   &vf_spoofchk))
+                               goto nla_put_failure;
                        nla_nest_end(skb, vf);
                }
                nla_nest_end(skb, vfinfo);
@@ -1113,6 +1118,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_PORT_SELF]        = { .type = NLA_NESTED },
        [IFLA_AF_SPEC]          = { .type = NLA_NESTED },
        [IFLA_EXT_MASK]         = { .type = NLA_U32 },
+       [IFLA_PROMISCUITY]      = { .type = NLA_U32 },
 };
 EXPORT_SYMBOL(ifla_policy);
 
index e59840010d45c9bc25f521fe1ef0717d2de984af..35b3a685e34233469c39c8a3c1f8d0d79ba9d28a 100644 (file)
@@ -67,8 +67,7 @@
 
 #include <asm/uaccess.h>
 #include <trace/events/skb.h>
-
-#include "kmap_skb.h"
+#include <linux/highmem.h>
 
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
@@ -707,10 +706,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
                        }
                        return -ENOMEM;
                }
-               vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+               vaddr = kmap_atomic(skb_frag_page(f));
                memcpy(page_address(page),
                       vaddr + f->page_offset, skb_frag_size(f));
-               kunmap_skb_frag(vaddr);
+               kunmap_atomic(vaddr);
                page->private = (unsigned long)head;
                head = page;
        }
@@ -1487,21 +1486,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
+               skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
                WARN_ON(start > offset + len);
 
-               end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+               end = start + skb_frag_size(f);
                if ((copy = end - offset) > 0) {
                        u8 *vaddr;
 
                        if (copy > len)
                                copy = len;
 
-                       vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+                       vaddr = kmap_atomic(skb_frag_page(f));
                        memcpy(to,
-                              vaddr + skb_shinfo(skb)->frags[i].page_offset+
-                              offset - start, copy);
-                       kunmap_skb_frag(vaddr);
+                              vaddr + f->page_offset + offset - start,
+                              copy);
+                       kunmap_atomic(vaddr);
 
                        if ((len -= copy) == 0)
                                return 0;
@@ -1806,10 +1806,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
                        if (copy > len)
                                copy = len;
 
-                       vaddr = kmap_skb_frag(frag);
+                       vaddr = kmap_atomic(skb_frag_page(frag));
                        memcpy(vaddr + frag->page_offset + offset - start,
                               from, copy);
-                       kunmap_skb_frag(vaddr);
+                       kunmap_atomic(vaddr);
 
                        if ((len -= copy) == 0)
                                return 0;
@@ -1869,21 +1869,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                int end;
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                WARN_ON(start > offset + len);
 
-               end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+               end = start + skb_frag_size(frag);
                if ((copy = end - offset) > 0) {
                        __wsum csum2;
                        u8 *vaddr;
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                        if (copy > len)
                                copy = len;
-                       vaddr = kmap_skb_frag(frag);
+                       vaddr = kmap_atomic(skb_frag_page(frag));
                        csum2 = csum_partial(vaddr + frag->page_offset +
                                             offset - start, copy, 0);
-                       kunmap_skb_frag(vaddr);
+                       kunmap_atomic(vaddr);
                        csum = csum_block_add(csum, csum2, pos);
                        if (!(len -= copy))
                                return csum;
@@ -1955,12 +1955,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 
                        if (copy > len)
                                copy = len;
-                       vaddr = kmap_skb_frag(frag);
+                       vaddr = kmap_atomic(skb_frag_page(frag));
                        csum2 = csum_partial_copy_nocheck(vaddr +
                                                          frag->page_offset +
                                                          offset - start, to,
                                                          copy, 0);
-                       kunmap_skb_frag(vaddr);
+                       kunmap_atomic(vaddr);
                        csum = csum_block_add(csum, csum2, pos);
                        if (!(len -= copy))
                                return csum;
@@ -2480,7 +2480,7 @@ next_skb:
 
                if (abs_offset < block_limit) {
                        if (!st->frag_data)
-                               st->frag_data = kmap_skb_frag(frag);
+                               st->frag_data = kmap_atomic(skb_frag_page(frag));
 
                        *data = (u8 *) st->frag_data + frag->page_offset +
                                (abs_offset - st->stepped_offset);
@@ -2489,7 +2489,7 @@ next_skb:
                }
 
                if (st->frag_data) {
-                       kunmap_skb_frag(st->frag_data);
+                       kunmap_atomic(st->frag_data);
                        st->frag_data = NULL;
                }
 
@@ -2498,7 +2498,7 @@ next_skb:
        }
 
        if (st->frag_data) {
-               kunmap_skb_frag(st->frag_data);
+               kunmap_atomic(st->frag_data);
                st->frag_data = NULL;
        }
 
@@ -2526,7 +2526,7 @@ EXPORT_SYMBOL(skb_seq_read);
 void skb_abort_seq_read(struct skb_seq_state *st)
 {
        if (st->frag_data)
-               kunmap_skb_frag(st->frag_data);
+               kunmap_atomic(st->frag_data);
 }
 EXPORT_SYMBOL(skb_abort_seq_read);
 
index dc3c3faff2f4a70e878953dc8ba910308182b728..39895a65e54ae59d35144c656c37483b43efcf87 100644 (file)
@@ -58,14 +58,11 @@ __be32 in_aton(const char *str)
        int i;
 
        l = 0;
-       for (i = 0; i < 4; i++)
-       {
+       for (i = 0; i < 4; i++) {
                l <<= 8;
-               if (*str != '\0')
-               {
+               if (*str != '\0') {
                        val = 0;
-                       while (*str != '\0' && *str != '.' && *str != '\n')
-                       {
+                       while (*str != '\0' && *str != '.' && *str != '\n') {
                                val *= 10;
                                val += *str - '0';
                                str++;
index d86053002c16b896fdea34bd174b37e6c4aa3054..8dfa1da7c40d0962bd960987c4734d6f22307591 100644 (file)
@@ -178,6 +178,7 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
        [DCB_ATTR_IEEE_ETS]         = {.len = sizeof(struct ieee_ets)},
        [DCB_ATTR_IEEE_PFC]         = {.len = sizeof(struct ieee_pfc)},
        [DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
+       [DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
 };
 
 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -1205,13 +1206,15 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
                if (!app)
                        goto nla_put_failure;
 
-               if (app_info_type)
-                       NLA_PUT(skb, app_info_type, sizeof(info), &info);
-
-               for (i = 0; i < app_count; i++)
-                       NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
-                               &table[i]);
+               if (app_info_type &&
+                   nla_put(skb, app_info_type, sizeof(info), &info))
+                       goto nla_put_failure;
 
+               for (i = 0; i < app_count; i++) {
+                       if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
+                                   &table[i]))
+                               goto nla_put_failure;
+               }
                nla_nest_end(skb, app);
        }
        err = 0;
@@ -1230,8 +1233,8 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
        int dcbx;
        int err = -EMSGSIZE;
 
-       NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+       if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+               goto nla_put_failure;
        ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
        if (!ieee)
                goto nla_put_failure;
@@ -1239,15 +1242,28 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
        if (ops->ieee_getets) {
                struct ieee_ets ets;
                err = ops->ieee_getets(netdev, &ets);
-               if (!err)
-                       NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+               if (!err &&
+                   nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
+                       goto nla_put_failure;
+       }
+
+       if (ops->ieee_getmaxrate) {
+               struct ieee_maxrate maxrate;
+               err = ops->ieee_getmaxrate(netdev, &maxrate);
+               if (!err) {
+                       err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
+                                     sizeof(maxrate), &maxrate);
+                       if (err)
+                               goto nla_put_failure;
+               }
        }
 
        if (ops->ieee_getpfc) {
                struct ieee_pfc pfc;
                err = ops->ieee_getpfc(netdev, &pfc);
-               if (!err)
-                       NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+               if (!err &&
+                   nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
+                       goto nla_put_failure;
        }
 
        app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
@@ -1278,15 +1294,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
        if (ops->ieee_peer_getets) {
                struct ieee_ets ets;
                err = ops->ieee_peer_getets(netdev, &ets);
-               if (!err)
-                       NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+               if (!err &&
+                   nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
+                       goto nla_put_failure;
        }
 
        if (ops->ieee_peer_getpfc) {
                struct ieee_pfc pfc;
                err = ops->ieee_peer_getpfc(netdev, &pfc);
-               if (!err)
-                       NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+               if (!err &&
+                   nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
+                       goto nla_put_failure;
        }
 
        if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1340,10 +1358,11 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
                        ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
                                          &prio, &pgid, &tc_pct, &up_map);
 
-               NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
-               NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
-               NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
-               NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
+               if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
+                   nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
+                   nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
+                   nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
+                       goto nla_put_failure;
                nla_nest_end(skb, tc_nest);
        }
 
@@ -1356,7 +1375,8 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
                else
                        ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
                                           &tc_pct);
-               NLA_PUT_U8(skb, i, tc_pct);
+               if (nla_put_u8(skb, i, tc_pct))
+                       goto nla_put_failure;
        }
        nla_nest_end(skb, pg);
        return 0;
@@ -1373,8 +1393,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
        int dcbx, i, err = -EMSGSIZE;
        u8 value;
 
-       NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+       if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+               goto nla_put_failure;
        cee = nla_nest_start(skb, DCB_ATTR_CEE);
        if (!cee)
                goto nla_put_failure;
@@ -1401,7 +1421,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
 
                for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
                        ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
-                       NLA_PUT_U8(skb, i, value);
+                       if (nla_put_u8(skb, i, value))
+                               goto nla_put_failure;
                }
                nla_nest_end(skb, pfc_nest);
        }
@@ -1454,8 +1475,9 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
 
                for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
                     i++)
-                       if (!ops->getfeatcfg(netdev, i, &value))
-                               NLA_PUT_U8(skb, i, value);
+                       if (!ops->getfeatcfg(netdev, i, &value) &&
+                           nla_put_u8(skb, i, value))
+                               goto nla_put_failure;
 
                nla_nest_end(skb, feat);
        }
@@ -1464,15 +1486,17 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
        if (ops->cee_peer_getpg) {
                struct cee_pg pg;
                err = ops->cee_peer_getpg(netdev, &pg);
-               if (!err)
-                       NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+               if (!err &&
+                   nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
+                       goto nla_put_failure;
        }
 
        if (ops->cee_peer_getpfc) {
                struct cee_pfc pfc;
                err = ops->cee_peer_getpfc(netdev, &pfc);
-               if (!err)
-                       NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+               if (!err &&
+                   nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
+                       goto nla_put_failure;
        }
 
        if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1589,6 +1613,14 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
                        goto err;
        }
 
+       if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
+               struct ieee_maxrate *maxrate =
+                       nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
+               err = ops->ieee_setmaxrate(netdev, maxrate);
+               if (err)
+                       goto err;
+       }
+
        if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
                struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
                err = ops->ieee_setpfc(netdev, pfc);
index c00e3077988c07005a9633f859c1b76a54018ca8..a4aecb09d12aac0717b934b486a85c77412635e5 100644 (file)
@@ -694,13 +694,13 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
        ifm->ifa_scope = ifa->ifa_scope;
        ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
 
-       if (ifa->ifa_address)
-               NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
-       if (ifa->ifa_local)
-               NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
-       if (ifa->ifa_label[0])
-               NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
-
+       if ((ifa->ifa_address &&
+            nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+           (ifa->ifa_local &&
+            nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
+           (ifa->ifa_label[0] &&
+            nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+               goto nla_put_failure;
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
index f65c9ddaee41cde5bdc2ba5780ea8f4cf40928f3..7399e3d519225763774e46a4afdaab1bf9408d9d 100644 (file)
@@ -204,11 +204,11 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
        frh->src_len = r->src_len;
        frh->tos = 0;
 
-       if (r->dst_len)
-               NLA_PUT_LE16(skb, FRA_DST, r->dst);
-       if (r->src_len)
-               NLA_PUT_LE16(skb, FRA_SRC, r->src);
-
+       if ((r->dst_len &&
+            nla_put_le16(skb, FRA_DST, r->dst)) ||
+           (r->src_len &&
+            nla_put_le16(skb, FRA_SRC, r->src)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index adaf46214905964f76bef59f7f660808c982054d..ca92587720f41fc60b3e973ed3bb5192c3344047 100644 (file)
@@ -63,15 +63,14 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
        if (!msg)
                return -ENOBUFS;
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-                       dev->dev_addr);
-
-       NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-                       addr->hwaddr);
-
-       NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr) ||
+           nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+                   addr->hwaddr) ||
+           nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
+               goto nla_put_failure;
 
        return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
@@ -92,14 +91,13 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
        if (!msg)
                return -ENOBUFS;
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-                       dev->dev_addr);
-
-       NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
-       NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr) ||
+           nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
+           nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+               goto nla_put_failure;
        return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -119,20 +117,22 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev,
        if (!msg)
                return -ENOBUFS;
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-                       dev->dev_addr);
-
-       if (addr->addr_type == IEEE802154_ADDR_LONG)
-               NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-                               addr->hwaddr);
-       else
-               NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
-                               addr->short_addr);
-
-       NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr))
+               goto nla_put_failure;
+       if (addr->addr_type == IEEE802154_ADDR_LONG) {
+               if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+                           addr->hwaddr))
+                       goto nla_put_failure;
+       } else {
+               if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
+                               addr->short_addr))
+                       goto nla_put_failure;
+       }
+       if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
+               goto nla_put_failure;
        return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -151,13 +151,12 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
        if (!msg)
                return -ENOBUFS;
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-                       dev->dev_addr);
-
-       NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr) ||
+           nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+               goto nla_put_failure;
        return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -177,13 +176,13 @@ int ieee802154_nl_beacon_indic(struct net_device *dev,
        if (!msg)
                return -ENOBUFS;
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-                       dev->dev_addr);
-       NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
-       NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr) ||
+           nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
+           nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
+               goto nla_put_failure;
        return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -204,19 +203,17 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
        if (!msg)
                return -ENOBUFS;
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-                       dev->dev_addr);
-
-       NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-       NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
-       NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
-
-       if (edl)
-               NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr) ||
+           nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) ||
+           nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) ||
+           nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) ||
+           nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) ||
+           (edl &&
+            nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
+               goto nla_put_failure;
        return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -235,13 +232,12 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
        if (!msg)
                return -ENOBUFS;
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-                       dev->dev_addr);
-
-       NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr) ||
+           nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+               goto nla_put_failure;
        return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -266,16 +262,16 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
        phy = ieee802154_mlme_ops(dev)->get_phy(dev);
        BUG_ON(!phy);
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-       NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-
-       NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-               dev->dev_addr);
-       NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
-               ieee802154_mlme_ops(dev)->get_short_addr(dev));
-       NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
-               ieee802154_mlme_ops(dev)->get_pan_id(dev));
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+                   dev->dev_addr) ||
+           nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR,
+                       ieee802154_mlme_ops(dev)->get_short_addr(dev)) ||
+           nla_put_u16(msg, IEEE802154_ATTR_PAN_ID,
+                       ieee802154_mlme_ops(dev)->get_pan_id(dev)))
+               goto nla_put_failure;
        wpan_phy_put(phy);
        return genlmsg_end(msg, hdr);
 
index c64a38d57aa36c4b521fd06c7a9c8c46bd0f337b..3bdc4303c339ea6cd6ebc0995f014382ae43d8d2 100644 (file)
@@ -53,18 +53,18 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
                goto out;
 
        mutex_lock(&phy->pib_lock);
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-
-       NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page);
-       NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel);
+       if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+           nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
+           nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
+               goto nla_put_failure;
        for (i = 0; i < 32; i++) {
                if (phy->channels_supported[i])
                        buf[pages++] = phy->channels_supported[i] | (i << 27);
        }
-       if (pages)
-               NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
-                               pages * sizeof(uint32_t), buf);
-
+       if (pages &&
+           nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
+                   pages * sizeof(uint32_t), buf))
+               goto nla_put_failure;
        mutex_unlock(&phy->pib_lock);
        kfree(buf);
        return genlmsg_end(msg, hdr);
@@ -245,9 +245,9 @@ static int ieee802154_add_iface(struct sk_buff *skb,
                        goto dev_unregister;
        }
 
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+               goto nla_put_failure;
        dev_put(dev);
 
        wpan_phy_put(phy);
@@ -333,10 +333,9 @@ static int ieee802154_del_iface(struct sk_buff *skb,
 
        rtnl_unlock();
 
-
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-       NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name);
-
+       if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
+               goto nla_put_failure;
        wpan_phy_put(phy);
 
        return ieee802154_nl_reply(msg, info);
index 6e447ff94dfa5881876ca1a1a86f5f0f1df3b9ae..7ba2196e437757d28485e92ec1ada4e1ca4699d2 100644 (file)
@@ -1266,17 +1266,15 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
        ifm->ifa_scope = ifa->ifa_scope;
        ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
 
-       if (ifa->ifa_address)
-               NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address);
-
-       if (ifa->ifa_local)
-               NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local);
-
-       if (ifa->ifa_broadcast)
-               NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
-
-       if (ifa->ifa_label[0])
-               NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
+       if ((ifa->ifa_address &&
+            nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+           (ifa->ifa_local &&
+            nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
+           (ifa->ifa_broadcast &&
+            nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
+           (ifa->ifa_label[0] &&
+            nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+               goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);
 
index 799fc790b3cfa67cc170799c89416d5b9da5671c..2d043f71ef7051f606f133a637dd9de1862644b3 100644 (file)
@@ -221,15 +221,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
        frh->src_len = rule4->src_len;
        frh->tos = rule4->tos;
 
-       if (rule4->dst_len)
-               NLA_PUT_BE32(skb, FRA_DST, rule4->dst);
-
-       if (rule4->src_len)
-               NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
-
+       if ((rule4->dst_len &&
+            nla_put_be32(skb, FRA_DST, rule4->dst)) ||
+           (rule4->src_len &&
+            nla_put_be32(skb, FRA_SRC, rule4->src)))
+               goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       if (rule4->tclassid)
-               NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
+       if (rule4->tclassid &&
+           nla_put_u32(skb, FRA_FLOW, rule4->tclassid))
+               goto nla_put_failure;
 #endif
        return 0;
 
index 5063fa38ac7becd71bf0eea737e596f9567b48a5..a8bdf7405433f55adcd0ef540dea1f0cf858c5f9 100644 (file)
@@ -931,33 +931,36 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
                rtm->rtm_table = tb_id;
        else
                rtm->rtm_table = RT_TABLE_COMPAT;
-       NLA_PUT_U32(skb, RTA_TABLE, tb_id);
+       if (nla_put_u32(skb, RTA_TABLE, tb_id))
+               goto nla_put_failure;
        rtm->rtm_type = type;
        rtm->rtm_flags = fi->fib_flags;
        rtm->rtm_scope = fi->fib_scope;
        rtm->rtm_protocol = fi->fib_protocol;
 
-       if (rtm->rtm_dst_len)
-               NLA_PUT_BE32(skb, RTA_DST, dst);
-
-       if (fi->fib_priority)
-               NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
-
+       if (rtm->rtm_dst_len &&
+           nla_put_be32(skb, RTA_DST, dst))
+               goto nla_put_failure;
+       if (fi->fib_priority &&
+           nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
+               goto nla_put_failure;
        if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
                goto nla_put_failure;
 
-       if (fi->fib_prefsrc)
-               NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
-
+       if (fi->fib_prefsrc &&
+           nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
+               goto nla_put_failure;
        if (fi->fib_nhs == 1) {
-               if (fi->fib_nh->nh_gw)
-                       NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
-
-               if (fi->fib_nh->nh_oif)
-                       NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
+               if (fi->fib_nh->nh_gw &&
+                   nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
+                       goto nla_put_failure;
+               if (fi->fib_nh->nh_oif &&
+                   nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
+                       goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-               if (fi->fib_nh[0].nh_tclassid)
-                       NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
+               if (fi->fib_nh[0].nh_tclassid &&
+                   nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+                       goto nla_put_failure;
 #endif
        }
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -978,11 +981,13 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
                        rtnh->rtnh_hops = nh->nh_weight - 1;
                        rtnh->rtnh_ifindex = nh->nh_oif;
 
-                       if (nh->nh_gw)
-                               NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
+                       if (nh->nh_gw &&
+                           nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
+                               goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-                       if (nh->nh_tclassid)
-                               NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
+                       if (nh->nh_tclassid &&
+                           nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+                               goto nla_put_failure;
 #endif
                        /* length of rtnetlink header + attributes */
                        rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
index 5dfecfd7d5e9fb9cfe154ad21eb93de60a3fd152..ceaac24ecdca2d666d67838da3137a87e9a21113 100644 (file)
@@ -774,7 +774,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
                        if (psf->sf_count[MCAST_INCLUDE] ||
                            pmc->sfcount[MCAST_EXCLUDE] !=
                            psf->sf_count[MCAST_EXCLUDE])
-                               continue;
+                               break;
                        if (srcs[i] == psf->sf_inaddr) {
                                scount++;
                                break;
index b57532d4742c7cccc95a05dd641bfc47875b4717..02d07c6f630fff6c76e817fd1fc22715fb1c5939 100644 (file)
@@ -1654,17 +1654,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct ip_tunnel *t = netdev_priv(dev);
        struct ip_tunnel_parm *p = &t->parms;
 
-       NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
-       NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
-       NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
-       NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
-       NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
-       NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
-       NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
-       NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
-       NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
-       NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
-
+       if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+           nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+           nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+           nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+           nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+           nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
+           nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
+           nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
+           nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
+           nla_put_u8(skb, IFLA_GRE_PMTUDISC,
+                      !!(p->iph.frag_off & htons(IP_DF))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 960fbfc3e976f5c8c65a2a95c8d3bd5946c0ec33..5bef604ac0fad82714f5dbf64ed3d1a56993e864 100644 (file)
@@ -2119,15 +2119,16 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        rtm->rtm_src_len  = 32;
        rtm->rtm_tos      = 0;
        rtm->rtm_table    = mrt->id;
-       NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+       if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+               goto nla_put_failure;
        rtm->rtm_type     = RTN_MULTICAST;
        rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = RTPROT_UNSPEC;
        rtm->rtm_flags    = 0;
 
-       NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
-       NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
-
+       if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
+           nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
+               goto nla_put_failure;
        if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
                goto nla_put_failure;
 
index cf73cc70ed2d2e1bfe1a5c837bc9993358e9904e..345c7dc08482d43cd69c609c8c2728e33735d7a3 100644 (file)
@@ -311,8 +311,9 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
                                const struct nf_conntrack_tuple *tuple)
 {
-       NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip);
-       NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip);
+       if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
+           nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 7cbe9cb261c29d44907760b7d51f0d95a9d64b27..0847e373d33c4d1a1ae2291e8ddbf71e9b9cd906 100644 (file)
@@ -228,10 +228,10 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
 static int icmp_tuple_to_nlattr(struct sk_buff *skb,
                                const struct nf_conntrack_tuple *t)
 {
-       NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id);
-       NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type);
-       NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code);
-
+       if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
+           nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
+           nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -293,8 +293,8 @@ icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
 {
        const unsigned int *timeout = data;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ));
-
+       if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 167ea10b521a8267b95bff128011b53d5cbb32c2..e4d18f2a305d46aba7f2b3196de93ed039b8e178 100644 (file)
@@ -229,7 +229,7 @@ const __u8 ip_tos2prio[16] = {
        TC_PRIO_INTERACTIVE_BULK,
        ECN_OR_COST(INTERACTIVE_BULK)
 };
-
+EXPORT_SYMBOL(ip_tos2prio);
 
 /*
  * Route cache.
@@ -2972,7 +2972,8 @@ static int rt_fill_info(struct net *net,
        r->rtm_src_len  = 0;
        r->rtm_tos      = rt->rt_key_tos;
        r->rtm_table    = RT_TABLE_MAIN;
-       NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
+       if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
+               goto nla_put_failure;
        r->rtm_type     = rt->rt_type;
        r->rtm_scope    = RT_SCOPE_UNIVERSE;
        r->rtm_protocol = RTPROT_UNSPEC;
@@ -2980,31 +2981,38 @@ static int rt_fill_info(struct net *net,
        if (rt->rt_flags & RTCF_NOTIFY)
                r->rtm_flags |= RTM_F_NOTIFY;
 
-       NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
-
+       if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
+               goto nla_put_failure;
        if (rt->rt_key_src) {
                r->rtm_src_len = 32;
-               NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
+               if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
+                       goto nla_put_failure;
        }
-       if (rt->dst.dev)
-               NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
+       if (rt->dst.dev &&
+           nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+               goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       if (rt->dst.tclassid)
-               NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
+       if (rt->dst.tclassid &&
+           nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
+               goto nla_put_failure;
 #endif
-       if (rt_is_input_route(rt))
-               NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
-       else if (rt->rt_src != rt->rt_key_src)
-               NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
-
-       if (rt->rt_dst != rt->rt_gateway)
-               NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
+       if (rt_is_input_route(rt)) {
+               if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
+                       goto nla_put_failure;
+       } else if (rt->rt_src != rt->rt_key_src) {
+               if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
+                       goto nla_put_failure;
+       }
+       if (rt->rt_dst != rt->rt_gateway &&
+           nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
+               goto nla_put_failure;
 
        if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
-       if (rt->rt_mark)
-               NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
+       if (rt->rt_mark &&
+           nla_put_be32(skb, RTA_MARK, rt->rt_mark))
+               goto nla_put_failure;
 
        error = rt->dst.error;
        if (peer) {
@@ -3045,7 +3053,8 @@ static int rt_fill_info(struct net *net,
                        }
                } else
 #endif
-                       NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
+                       if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+                               goto nla_put_failure;
        }
 
        if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
index 6a3bb6077e19715bf2732bbce3d1c09d06aed104..fcd230a6235a5a2137bac64bfb6aaf33d8cc600b 100644 (file)
@@ -336,10 +336,9 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
        snmp6_free_dev(idev);
        kfree_rcu(idev, rcu);
 }
-
 EXPORT_SYMBOL(in6_dev_finish_destroy);
 
-static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
+static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 {
        struct inet6_dev *ndev;
 
@@ -441,7 +440,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
        return ndev;
 }
 
-static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
+static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
 {
        struct inet6_dev *idev;
 
@@ -1333,7 +1332,6 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
        rcu_read_unlock();
        return onlink;
 }
-
 EXPORT_SYMBOL(ipv6_chk_prefix);
 
 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
@@ -1523,7 +1521,7 @@ static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
        if (dev->addr_len != ARCNET_ALEN)
                return -1;
        memset(eui, 0, 7);
-       eui[7] = *(u8*)dev->dev_addr;
+       eui[7] = *(u8 *)dev->dev_addr;
        return 0;
 }
 
@@ -1668,7 +1666,8 @@ out:
        in6_dev_put(idev);
 }
 
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) {
+static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+{
        int ret = 0;
 
        if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
@@ -1911,7 +1910,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
        /* Try to figure out our local address for this prefix */
 
        if (pinfo->autoconf && in6_dev->cnf.autoconf) {
-               struct inet6_ifaddr * ifp;
+               struct inet6_ifaddr *ifp;
                struct in6_addr addr;
                int create = 0, update_lft = 0;
 
@@ -2365,9 +2364,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
        }
 
        for_each_netdev(net, dev) {
-               struct in_device * in_dev = __in_dev_get_rtnl(dev);
+               struct in_device *in_dev = __in_dev_get_rtnl(dev);
                if (in_dev && (dev->flags & IFF_UP)) {
-                       struct in_ifaddr * ifa;
+                       struct in_ifaddr *ifa;
 
                        int flag = scope;
 
@@ -2413,7 +2412,7 @@ static void init_loopback(struct net_device *dev)
 
 static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
 {
-       struct inet6_ifaddr * ifp;
+       struct inet6_ifaddr *ifp;
        u32 addr_flags = IFA_F_PERMANENT;
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -2434,7 +2433,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
 static void addrconf_dev_config(struct net_device *dev)
 {
        struct in6_addr addr;
-       struct inet6_dev    * idev;
+       struct inet6_dev *idev;
 
        ASSERT_RTNL();
 
@@ -2570,7 +2569,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
 }
 
 static int addrconf_notify(struct notifier_block *this, unsigned long event,
-                          void * data)
+                          void *data)
 {
        struct net_device *dev = (struct net_device *) data;
        struct inet6_dev *idev = __in6_dev_get(dev);
@@ -3794,7 +3793,7 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
        return inet6_dump_addr(skb, cb, type);
 }
 
-static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdrnlh,
+static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                             void *arg)
 {
        struct net *net = sock_net(in_skb->sk);
@@ -3989,14 +3988,14 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
        struct nlattr *nla;
        struct ifla_cacheinfo ci;
 
-       NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
-
+       if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+               goto nla_put_failure;
        ci.max_reasm_len = IPV6_MAXPLEN;
        ci.tstamp = cstamp_delta(idev->tstamp);
        ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
        ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
-       NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
-
+       if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
+               goto nla_put_failure;
        nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
        if (nla == NULL)
                goto nla_put_failure;
@@ -4061,15 +4060,13 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
        hdr->ifi_flags = dev_get_flags(dev);
        hdr->ifi_change = 0;
 
-       NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-
-       if (dev->addr_len)
-               NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
-       NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-       if (dev->ifindex != dev->iflink)
-               NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
+       if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+           (dev->addr_len &&
+            nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+           nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+           (dev->ifindex != dev->iflink &&
+            nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+               goto nla_put_failure;
        protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
        if (protoinfo == NULL)
                goto nla_put_failure;
@@ -4182,12 +4179,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
        if (pinfo->autoconf)
                pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
 
-       NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix);
-
+       if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
+               goto nla_put_failure;
        ci.preferred_time = ntohl(pinfo->prefered);
        ci.valid_time = ntohl(pinfo->valid);
-       NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci);
-
+       if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
+               goto nla_put_failure;
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
index 399287e595d7cb74c9018009305710077d30827a..7981bde575755f22d08b017af1c35cd0b1c8ffce 100644 (file)
@@ -10,7 +10,7 @@
 
 static inline unsigned ipv6_addr_scope2type(unsigned scope)
 {
-       switch(scope) {
+       switch (scope) {
        case IPV6_ADDR_SCOPE_NODELOCAL:
                return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
                        IPV6_ADDR_LOOPBACK);
index 76832c8dc89dae671905728c391ea827dabc613f..f6210d6fd7d809a7ff45d24e53d4f1254b8c6752 100644 (file)
@@ -98,7 +98,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                sin.sin_port = usin->sin6_port;
 
                err = ip4_datagram_connect(sk,
-                                          (struct sockaddr*) &sin,
+                                          (struct sockaddr *) &sin,
                                           sizeof(sin));
 
 ipv4_connected:
@@ -518,7 +518,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                        unsigned len;
                        u8 *ptr = nh + off;
 
-                       switch(nexthdr) {
+                       switch (nexthdr) {
                        case IPPROTO_DSTOPTS:
                                nexthdr = ptr[0];
                                len = (ptr[1] + 1) << 3;
@@ -827,9 +827,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
                        int tc;
 
                        err = -EINVAL;
-                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
                                goto exit_f;
-                       }
 
                        tc = *(int *)CMSG_DATA(cmsg);
                        if (tc < -1 || tc > 0xff)
@@ -846,9 +845,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
                        int df;
 
                        err = -EINVAL;
-                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
                                goto exit_f;
-                       }
 
                        df = *(int *)CMSG_DATA(cmsg);
                        if (df < 0 || df > 1)
index 3d641b6e9b09256cb43f1cca0e9cabcc1526e30f..aa0a51e64682ac4a9e8db3aa9e57f1c02f227f66 100644 (file)
@@ -153,6 +153,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
 
        while (len > 0) {
                int optlen = nh[off + 1] + 2;
+               int i;
 
                switch (nh[off]) {
                case IPV6_TLV_PAD0:
@@ -160,6 +161,21 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
                        break;
 
                case IPV6_TLV_PADN:
+                       /* RFC 2460 states that the purpose of PadN is
+                        * to align the containing header to multiples
+                        * of 8. 7 is therefore the highest valid value.
+                        * See also RFC 4942, Section 2.1.9.5.
+                        */
+                       if (optlen > 7)
+                               goto bad;
+                       /* RFC 4942 recommends receiving hosts to
+                        * actively check PadN payload to contain
+                        * only zeroes.
+                        */
+                       for (i = 2; i < optlen; i++) {
+                               if (nh[off + i] != 0)
+                                       goto bad;
+                       }
                        break;
 
                default: /* Other TLV code so scan list */
@@ -722,7 +738,6 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
        if (opt->hopopt)
                ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
 }
-
 EXPORT_SYMBOL(ipv6_push_nfrag_opts);
 
 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
@@ -738,20 +753,19 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
 
        opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
        if (opt2) {
-               long dif = (char*)opt2 - (char*)opt;
+               long dif = (char *)opt2 - (char *)opt;
                memcpy(opt2, opt, opt->tot_len);
                if (opt2->hopopt)
-                       *((char**)&opt2->hopopt) += dif;
+                       *((char **)&opt2->hopopt) += dif;
                if (opt2->dst0opt)
-                       *((char**)&opt2->dst0opt) += dif;
+                       *((char **)&opt2->dst0opt) += dif;
                if (opt2->dst1opt)
-                       *((char**)&opt2->dst1opt) += dif;
+                       *((char **)&opt2->dst1opt) += dif;
                if (opt2->srcrt)
-                       *((char**)&opt2->srcrt) += dif;
+                       *((char **)&opt2->srcrt) += dif;
        }
        return opt2;
 }
-
 EXPORT_SYMBOL_GPL(ipv6_dup_options);
 
 static int ipv6_renew_option(void *ohdr,
@@ -892,5 +906,4 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
        fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
        return orig;
 }
-
 EXPORT_SYMBOL_GPL(fl6_update_dst);
index 72957f4a7c6c9f6c24056a5c3eb6046b61c7a3ea..7b1a884634d5b4c5c8aedb4dc6439df141078e9d 100644 (file)
@@ -21,6 +21,7 @@ int ipv6_ext_hdr(u8 nexthdr)
                 (nexthdr == NEXTHDR_NONE)      ||
                 (nexthdr == NEXTHDR_DEST);
 }
+EXPORT_SYMBOL(ipv6_ext_hdr);
 
 /*
  * Skip any extension headers. This is used by the ICMP module.
@@ -109,6 +110,4 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
        *nexthdrp = nexthdr;
        return start;
 }
-
-EXPORT_SYMBOL(ipv6_ext_hdr);
 EXPORT_SYMBOL(ipv6_skip_exthdr);
index b6c573152067c36c90841a35ba191b611d133acc..0ff1cfd55bc4949fc02bf9dcef30fca58727c308 100644 (file)
@@ -22,8 +22,7 @@
 #include <net/ip6_route.h>
 #include <net/netlink.h>
 
-struct fib6_rule
-{
+struct fib6_rule {
        struct fib_rule         common;
        struct rt6key           src;
        struct rt6key           dst;
@@ -215,14 +214,13 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
        frh->src_len = rule6->src.plen;
        frh->tos = rule6->tclass;
 
-       if (rule6->dst.plen)
-               NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr),
-                       &rule6->dst.addr);
-
-       if (rule6->src.plen)
-               NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr),
-                       &rule6->src.addr);
-
+       if ((rule6->dst.plen &&
+            nla_put(skb, FRA_DST, sizeof(struct in6_addr),
+                    &rule6->dst.addr)) ||
+           (rule6->src.plen &&
+            nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
+                    &rule6->src.addr)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 27ac95a6342927e5365af6616af82815e603619e..cc079d8d4681aee942fdcfc6a93046a151e06e95 100644 (file)
@@ -498,7 +498,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        err = ip6_append_data(sk, icmpv6_getfrag, &msg,
                              len + sizeof(struct icmp6hdr),
                              sizeof(struct icmp6hdr), hlimit,
-                             np->tclass, NULL, &fl6, (struct rt6_info*)dst,
+                             np->tclass, NULL, &fl6, (struct rt6_info *)dst,
                              MSG_DONTWAIT, np->dontfrag);
        if (err) {
                ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
@@ -579,7 +579,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
 
        err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
                                sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
-                               (struct rt6_info*)dst, MSG_DONTWAIT,
+                               (struct rt6_info *)dst, MSG_DONTWAIT,
                                np->dontfrag);
 
        if (err) {
@@ -950,7 +950,6 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
 
        return fatal;
 }
-
 EXPORT_SYMBOL(icmpv6_err_convert);
 
 #ifdef CONFIG_SYSCTL
index 8110362e0af558a9ff77824bc78c1b3677369855..efc0098b59dd34b9dabd07ae84f8aa9a1888ce1a 100644 (file)
@@ -2215,14 +2215,15 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        rtm->rtm_src_len  = 128;
        rtm->rtm_tos      = 0;
        rtm->rtm_table    = mrt->id;
-       NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+       if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+               goto nla_put_failure;
        rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = RTPROT_UNSPEC;
        rtm->rtm_flags    = 0;
 
-       NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
-       NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
-
+       if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
+           nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
+               goto nla_put_failure;
        if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
                goto nla_put_failure;
 
index 63dd1f89ed7deaed484382e6a82b66233e60dd6b..ca1af0760c4cfd28f330f888a0ae8f959afd455c 100644 (file)
@@ -678,7 +678,6 @@ done:
        }
        case MCAST_MSFILTER:
        {
-               extern int sysctl_mld_max_msf;
                struct group_filter *gsf;
 
                if (optlen < GROUP_FILTER_SIZE(0))
index b2869cab2092ae2d08e6b090c98fbe6aece35e75..7dfb89f2bae5c54bb40554e5e9975d00fcd9add7 100644 (file)
@@ -1061,7 +1061,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
                        if (psf->sf_count[MCAST_INCLUDE] ||
                            pmc->mca_sfcount[MCAST_EXCLUDE] !=
                            psf->sf_count[MCAST_EXCLUDE])
-                               continue;
+                               break;
                        if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
                                scount++;
                                break;
index 3dcdb81ec3e8abdb934627cd243aed0e5ce3b5a3..7cb236e8e261e0620070273fabaa2a1673b043a0 100644 (file)
@@ -15,6 +15,7 @@
 /*
  *     Changes:
  *
+ *     Alexey I. Froloff               :       RFC6106 (DNSSL) support
  *     Pierre Ynard                    :       export userland ND options
  *                                             through netlink (RDNSS support)
  *     Lars Fenneberg                  :       fixed MTU setting on receipt
@@ -228,7 +229,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
 
 static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
 {
-       return opt->nd_opt_type == ND_OPT_RDNSS;
+       return opt->nd_opt_type == ND_OPT_RDNSS ||
+               opt->nd_opt_type == ND_OPT_DNSSL;
 }
 
 static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -1099,8 +1101,9 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
 
        memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
 
-       NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
-               &ipv6_hdr(ra)->saddr);
+       if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
+                   &ipv6_hdr(ra)->saddr))
+               goto nla_put_failure;
        nlmsg_end(skb, nlh);
 
        rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC);
index 4111050a9fc524e0cd225aa81d5dfa4c4e166d06..fe925e492520c5d0ec8226618bfd84d828ebab08 100644 (file)
@@ -278,10 +278,11 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
                                const struct nf_conntrack_tuple *tuple)
 {
-       NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
-               &tuple->src.u3.ip6);
-       NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
-               &tuple->dst.u3.ip6);
+       if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
+                   &tuple->src.u3.ip6) ||
+           nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
+                   &tuple->dst.u3.ip6))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 92cc9f2931ae46367a7e5fad02e3f31d7978069c..3e81904fbbcdf09b13c2b830bd40f0b2b7ed313a 100644 (file)
@@ -234,10 +234,10 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
 static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
                                  const struct nf_conntrack_tuple *t)
 {
-       NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id);
-       NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type);
-       NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code);
-
+       if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
+           nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
+           nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -300,8 +300,8 @@ icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
 {
        const unsigned int *timeout = data;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ));
-
+       if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 3992e26a603987cf8bba458dd4f687af5c900a2f..8c5df6f3a2decc2f5d821c713bbfdd9f514a5011 100644 (file)
@@ -2413,7 +2413,8 @@ static int rt6_fill_node(struct net *net,
        else
                table = RT6_TABLE_UNSPEC;
        rtm->rtm_table = table;
-       NLA_PUT_U32(skb, RTA_TABLE, table);
+       if (nla_put_u32(skb, RTA_TABLE, table))
+               goto nla_put_failure;
        if (rt->rt6i_flags & RTF_REJECT)
                rtm->rtm_type = RTN_UNREACHABLE;
        else if (rt->rt6i_flags & RTF_LOCAL)
@@ -2436,16 +2437,20 @@ static int rt6_fill_node(struct net *net,
                rtm->rtm_flags |= RTM_F_CLONED;
 
        if (dst) {
-               NLA_PUT(skb, RTA_DST, 16, dst);
+               if (nla_put(skb, RTA_DST, 16, dst))
+                       goto nla_put_failure;
                rtm->rtm_dst_len = 128;
        } else if (rtm->rtm_dst_len)
-               NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
+               if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
+                       goto nla_put_failure;
 #ifdef CONFIG_IPV6_SUBTREES
        if (src) {
-               NLA_PUT(skb, RTA_SRC, 16, src);
+               if (nla_put(skb, RTA_SRC, 16, src))
+                       goto nla_put_failure;
                rtm->rtm_src_len = 128;
-       } else if (rtm->rtm_src_len)
-               NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
+       } else if (rtm->rtm_src_len &&
+                  nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
+               goto nla_put_failure;
 #endif
        if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
@@ -2463,17 +2468,20 @@ static int rt6_fill_node(struct net *net,
                        }
                } else
 #endif
-                       NLA_PUT_U32(skb, RTA_IIF, iif);
+                       if (nla_put_u32(skb, RTA_IIF, iif))
+                               goto nla_put_failure;
        } else if (dst) {
                struct in6_addr saddr_buf;
-               if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
-                       NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+               if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
+                   nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+                       goto nla_put_failure;
        }
 
        if (rt->rt6i_prefsrc.plen) {
                struct in6_addr saddr_buf;
                saddr_buf = rt->rt6i_prefsrc.addr;
-               NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+               if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+                       goto nla_put_failure;
        }
 
        if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
@@ -2489,11 +2497,11 @@ static int rt6_fill_node(struct net *net,
        }
        rcu_read_unlock();
 
-       if (rt->dst.dev)
-               NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-
-       NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
-
+       if (rt->dst.dev &&
+           nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+               goto nla_put_failure;
+       if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
+               goto nla_put_failure;
        if (!(rt->rt6i_flags & RTF_EXPIRES))
                expires = 0;
        else if (rt->dst.expires - jiffies < INT_MAX)
@@ -2598,6 +2606,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb) {
+               dst_release(&rt->dst);
                err = -ENOBUFS;
                goto errout;
        }
index c4ffd174352895630c878729cb7ce001ae9c9bf3..f9608db9dcfbf2d39cbdf27c9b293289a399673b 100644 (file)
@@ -115,7 +115,7 @@ static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
 /*
  * Must be invoked with rcu_read_lock
  */
-static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
+static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
                struct net_device *dev, __be32 remote, __be32 local)
 {
        unsigned int h0 = HASH(remote);
@@ -691,7 +691,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        goto tx_error;
                }
 
-               addr6 = (const struct in6_addr*)&neigh->primary_key;
+               addr6 = (const struct in6_addr *)&neigh->primary_key;
                addr_type = ipv6_addr_type(addr6);
 
                if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -721,7 +721,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        goto tx_error;
                }
 
-               addr6 = (const struct in6_addr*)&neigh->primary_key;
+               addr6 = (const struct in6_addr *)&neigh->primary_key;
                addr_type = ipv6_addr_type(addr6);
 
                if (addr_type == IPV6_ADDR_ANY) {
index 93a41a09458bf0a1b667005b6fabd1ade8a57073..bc8c3348f835b1321f18847cc7f9c4f6f73d91be 100644 (file)
@@ -231,24 +231,28 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
        if (IS_ERR(hdr))
                return PTR_ERR(hdr);
 
-       NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
-       NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
-       NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
-       NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
-       NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
+       if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
+           nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+           nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+           nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
+           nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
+               goto nla_put_failure;
 
        nest = nla_nest_start(skb, L2TP_ATTR_STATS);
        if (nest == NULL)
                goto nla_put_failure;
 
-       NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
-       NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
-       NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
+       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+                       tunnel->stats.rx_seq_discards) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+                       tunnel->stats.rx_oos_packets) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors))
+               goto nla_put_failure;
        nla_nest_end(skb, nest);
 
        sk = tunnel->sock;
@@ -259,13 +263,16 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
 
        switch (tunnel->encap) {
        case L2TP_ENCAPTYPE_UDP:
-               NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
-               NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
-               NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
+               if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+                   nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
+                   nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
+                              (sk->sk_no_check != UDP_CSUM_NOXMIT)))
+                       goto nla_put_failure;
                /* NOBREAK */
        case L2TP_ENCAPTYPE_IP:
-               NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
-               NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
+               if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
+                   nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+                       goto nla_put_failure;
                break;
        }
 
@@ -563,43 +570,50 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
        if (IS_ERR(hdr))
                return PTR_ERR(hdr);
 
-       NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
-       NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
-       NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
-       NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
-       NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
-       NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
-       NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
-       if (session->mru)
-               NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
-
-       if (session->ifname && session->ifname[0])
-               NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
-       if (session->cookie_len)
-               NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
-       if (session->peer_cookie_len)
-               NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
-       NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
-       NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
-       NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
+       if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+           nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
+           nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+           nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
+                       session->peer_session_id) ||
+           nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
+           nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
+           nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
+           (session->mru &&
+            nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
+               goto nla_put_failure;
+
+       if ((session->ifname && session->ifname[0] &&
+            nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+           (session->cookie_len &&
+            nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
+                    &session->cookie[0])) ||
+           (session->peer_cookie_len &&
+            nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
+                    &session->peer_cookie[0])) ||
+           nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
+           nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
+           nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
 #ifdef CONFIG_XFRM
-       if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
-               NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
+           (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
+            nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
 #endif
-       if (session->reorder_timeout)
-               NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
-
+           (session->reorder_timeout &&
+            nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
+               goto nla_put_failure;
        nest = nla_nest_start(skb, L2TP_ATTR_STATS);
        if (nest == NULL)
                goto nla_put_failure;
-       NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
-       NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
-       NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
-       NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
+       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+                       session->stats.rx_seq_discards) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+                       session->stats.rx_oos_packets) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors))
+               goto nla_put_failure;
        nla_nest_end(skb, nest);
 
        return genlmsg_end(skb, hdr);
index a72a4dff0031b72ac26412fe953d3f90d698a790..7e1b061aeeba4c14cb45785b2975b353ab18a314 100644 (file)
@@ -109,8 +109,9 @@ bitmap_ip_list(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-                               htonl(map->first_ip + id * map->hosts));
+               if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+                                   htonl(map->first_ip + id * map->hosts)))
+                       goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, atd);
@@ -194,10 +195,11 @@ bitmap_ip_tlist(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-                               htonl(map->first_ip + id * map->hosts));
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                             htonl(ip_set_timeout_get(members[id])));
+               if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+                                   htonl(map->first_ip + id * map->hosts)) ||
+                   nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                                 htonl(ip_set_timeout_get(members[id]))))
+                       goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, adt);
@@ -334,15 +336,16 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
-       if (map->netmask != 32)
-               NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-                     htonl(sizeof(*map) + map->memsize));
-       if (with_timeout(map->timeout))
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+           (map->netmask != 32 &&
+            nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
+           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+           nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+                         htonl(sizeof(*map) + map->memsize)) ||
+           (with_timeout(map->timeout) &&
+            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+               goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
        return 0;
index 81324c12c5bec09a288cc39a60ea1c920da2ec62..0bb16c469a89ee49248b0827bcf7e9331542eccf 100644 (file)
@@ -186,11 +186,12 @@ bitmap_ipmac_list(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-                               htonl(map->first_ip + id));
-               if (elem->match == MAC_FILLED)
-                       NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
-                               elem->ether);
+               if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+                                   htonl(map->first_ip + id)) ||
+                   (elem->match == MAC_FILLED &&
+                    nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+                            elem->ether)))
+                       goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, atd);
@@ -314,14 +315,16 @@ bitmap_ipmac_tlist(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-                               htonl(map->first_ip + id));
-               if (elem->match == MAC_FILLED)
-                       NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
-                               elem->ether);
+               if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+                                   htonl(map->first_ip + id)) ||
+                   (elem->match == MAC_FILLED &&
+                    nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+                            elem->ether)))
+                   goto nla_put_failure;
                timeout = elem->match == MAC_UNSET ? elem->timeout
                                : ip_set_timeout_get(elem->timeout);
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+               if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
+                   goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, atd);
@@ -438,14 +441,16 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-                     htonl(sizeof(*map)
-                           + (map->last_ip - map->first_ip + 1) * map->dsize));
-       if (with_timeout(map->timeout))
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+           nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+                         htonl(sizeof(*map) +
+                               ((map->last_ip - map->first_ip + 1) *
+                                map->dsize))) ||
+           (with_timeout(map->timeout) &&
+            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+               goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
        return 0;
index 382ec28ba72efc1fd420eeb38153825593375ba8..b9f1fce7053b29d9fb761d634a581d2d8b8ef1d2 100644 (file)
@@ -96,8 +96,9 @@ bitmap_port_list(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
-                             htons(map->first_port + id));
+               if (nla_put_net16(skb, IPSET_ATTR_PORT,
+                                 htons(map->first_port + id)))
+                       goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, atd);
@@ -183,10 +184,11 @@ bitmap_port_tlist(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
-                             htons(map->first_port + id));
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                             htonl(ip_set_timeout_get(members[id])));
+               if (nla_put_net16(skb, IPSET_ATTR_PORT,
+                                 htons(map->first_port + id)) ||
+                   nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                                 htonl(ip_set_timeout_get(members[id]))))
+                       goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, adt);
@@ -320,13 +322,14 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-                     htonl(sizeof(*map) + map->memsize));
-       if (with_timeout(map->timeout))
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) ||
+           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+           nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+                         htonl(sizeof(*map) + map->memsize)) ||
+           (with_timeout(map->timeout) &&
+            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+               goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
        return 0;
index e6c1c9605a58804fe514406b4a272d7ba1badfd2..eb66b9790a6ffd4918253be858b4a362e530f9aa 100644 (file)
@@ -1092,19 +1092,21 @@ dump_last:
                        ret = -EMSGSIZE;
                        goto release_refcount;
                }
-               NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-               NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+               if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+                   nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
+                       goto nla_put_failure;
                if (dump_flags & IPSET_FLAG_LIST_SETNAME)
                        goto next_set;
                switch (cb->args[2]) {
                case 0:
                        /* Core header data */
-                       NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
-                                      set->type->name);
-                       NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
-                                  set->family);
-                       NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
-                                  set->revision);
+                       if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
+                                          set->type->name) ||
+                           nla_put_u8(skb, IPSET_ATTR_FAMILY,
+                                      set->family) ||
+                           nla_put_u8(skb, IPSET_ATTR_REVISION,
+                                      set->revision))
+                               goto nla_put_failure;
                        ret = set->variant->head(set, skb);
                        if (ret < 0)
                                goto release_refcount;
@@ -1410,11 +1412,12 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
                         IPSET_CMD_HEADER);
        if (!nlh2)
                goto nlmsg_failure;
-       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-       NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
-       NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
-       NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
-       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision);
+       if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+           nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
+           nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
+           nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
+           nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
+               goto nla_put_failure;
        nlmsg_end(skb2, nlh2);
 
        ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1469,11 +1472,12 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
                         IPSET_CMD_TYPE);
        if (!nlh2)
                goto nlmsg_failure;
-       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-       NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
-       NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
-       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
-       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+       if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+           nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
+           nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
+           nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
+           nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
+               goto nla_put_failure;
        nlmsg_end(skb2, nlh2);
 
        pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
@@ -1517,7 +1521,8 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
                         IPSET_CMD_PROTOCOL);
        if (!nlh2)
                goto nlmsg_failure;
-       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+       if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
+               goto nla_put_failure;
        nlmsg_end(skb2, nlh2);
 
        ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
index 5139dea6019e44ba24ee01d9c4f65beb3eddfe61..507fe93794aac6ea4ae1734e35c4bc90cbaa94c2 100644 (file)
@@ -81,7 +81,8 @@ hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
 static inline bool
 hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
 {
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -94,9 +95,10 @@ hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
        const struct hash_ip4_telem *tdata =
                (const struct hash_ip4_telem *)data;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(tdata->timeout)));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(tdata->timeout))))
+               goto nla_put_failure;
 
        return 0;
 
@@ -262,7 +264,8 @@ ip6_netmask(union nf_inet_addr *ip, u8 prefix)
 static bool
 hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
 {
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -275,9 +278,10 @@ hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
        const struct hash_ip6_telem *e =
                (const struct hash_ip6_telem *)data;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(e->timeout)));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(e->timeout))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 9c27e249c1713bb14d9d03c730bd2a5a125c6177..68f284c97490d51461a06b990cc8c3d08ebbe96e 100644 (file)
@@ -93,9 +93,10 @@ static bool
 hash_ipport4_data_list(struct sk_buff *skb,
                       const struct hash_ipport4_elem *data)
 {
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -109,12 +110,12 @@ hash_ipport4_data_tlist(struct sk_buff *skb,
        const struct hash_ipport4_telem *tdata =
                (const struct hash_ipport4_telem *)data;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(tdata->timeout)));
-
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(tdata->timeout))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -308,9 +309,10 @@ static bool
 hash_ipport6_data_list(struct sk_buff *skb,
                       const struct hash_ipport6_elem *data)
 {
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -324,11 +326,12 @@ hash_ipport6_data_tlist(struct sk_buff *skb,
        const struct hash_ipport6_telem *e =
                (const struct hash_ipport6_telem *)data;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(e->timeout)));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(e->timeout))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 9134057c07284cf41b1e2d119415e8c043a0fc9a..1eec4b9e0dca93698ada6cfa610b0c1cfcadaebb 100644 (file)
@@ -94,10 +94,11 @@ static bool
 hash_ipportip4_data_list(struct sk_buff *skb,
                       const struct hash_ipportip4_elem *data)
 {
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -111,13 +112,13 @@ hash_ipportip4_data_tlist(struct sk_buff *skb,
        const struct hash_ipportip4_telem *tdata =
                (const struct hash_ipportip4_telem *)data;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(tdata->timeout)));
-
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(tdata->timeout))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -319,10 +320,11 @@ static bool
 hash_ipportip6_data_list(struct sk_buff *skb,
                         const struct hash_ipportip6_elem *data)
 {
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -336,12 +338,13 @@ hash_ipportip6_data_tlist(struct sk_buff *skb,
        const struct hash_ipportip6_telem *e =
                (const struct hash_ipportip6_telem *)data;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(e->timeout)));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(e->timeout))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 5d05e69698626570ca8247732d4bb8f8af632674..62d66ecef369376d833f386b3b0208ed0aef2147 100644 (file)
@@ -124,13 +124,14 @@ hash_ipportnet4_data_list(struct sk_buff *skb,
 {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -145,16 +146,16 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb,
                (const struct hash_ipportnet4_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(tdata->timeout)));
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(tdata->timeout))) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -436,13 +437,14 @@ hash_ipportnet6_data_list(struct sk_buff *skb,
 {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -457,15 +459,16 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb,
                (const struct hash_ipportnet6_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(e->timeout)));
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(e->timeout))) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 7c3d945517cfa55c62faeccb24d6bce29c958faa..6607a814be5791511544d5d743ba39b37c0aee15 100644 (file)
@@ -111,10 +111,11 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
 {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -128,13 +129,13 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
                (const struct hash_net4_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(tdata->timeout)));
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(tdata->timeout))) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -339,10 +340,11 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
 {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -356,12 +358,13 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
                (const struct hash_net6_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(e->timeout)));
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(e->timeout))) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index f24037ff432201015e81731f61133d45079128d6..6093f3daa91112ba0d161a0b5661a3c38013107f 100644 (file)
@@ -252,11 +252,12 @@ hash_netiface4_data_list(struct sk_buff *skb,
 
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-       NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+           nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -273,13 +274,14 @@ hash_netiface4_data_tlist(struct sk_buff *skb,
 
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-       NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(tdata->timeout)));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+           nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(tdata->timeout))))
+               goto nla_put_failure;
 
        return 0;
 
@@ -555,11 +557,12 @@ hash_netiface6_data_list(struct sk_buff *skb,
 
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-       NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+           nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -576,13 +579,14 @@ hash_netiface6_data_tlist(struct sk_buff *skb,
 
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-       NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(e->timeout)));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+           nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(e->timeout))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index ce2e77100b64ecb521db1cf45c0babd4902a374f..ae3c644adc141e95b3a25945a45ef67983d1718a 100644 (file)
@@ -124,12 +124,13 @@ hash_netport4_data_list(struct sk_buff *skb,
 {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -144,15 +145,15 @@ hash_netport4_data_tlist(struct sk_buff *skb,
                (const struct hash_netport4_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(tdata->timeout)));
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(tdata->timeout))) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -402,12 +403,13 @@ hash_netport6_data_list(struct sk_buff *skb,
 {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -422,14 +424,15 @@ hash_netport6_data_tlist(struct sk_buff *skb,
                (const struct hash_netport6_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                     htonl(ip_set_timeout_get(e->timeout)));
-       if (flags)
-               NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                         htonl(ip_set_timeout_get(e->timeout))) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 7e095f9005f01f301cefe757533981e787b82ad5..6cb1225765f952667b0f4c763e635461fcb94ad0 100644 (file)
@@ -402,12 +402,13 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
-       NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
-       if (with_timeout(map->timeout))
-               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
-       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-                     htonl(sizeof(*map) + map->size * map->dsize));
+       if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
+           (with_timeout(map->timeout) &&
+            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
+           nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+           nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+                         htonl(sizeof(*map) + map->size * map->dsize)))
+               goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
        return 0;
@@ -442,13 +443,15 @@ list_set_list(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
-                              ip_set_name_byindex(e->id));
+               if (nla_put_string(skb, IPSET_ATTR_NAME,
+                                  ip_set_name_byindex(e->id)))
+                       goto nla_put_failure;
                if (with_timeout(map->timeout)) {
                        const struct set_telem *te =
                                (const struct set_telem *) e;
-                       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-                                     htonl(ip_set_timeout_get(te->timeout)));
+                       __be32 to = htonl(ip_set_timeout_get(te->timeout));
+                       if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to))
+                               goto nla_put_failure;
                }
                ipset_nest_end(skb, nested);
        }
index b3afe189af61880464ef6562c568016509957293..964d426d237ffe22edaf9dfce7503a29b8fed679 100644 (file)
@@ -2816,17 +2816,17 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
 
        ip_vs_copy_stats(&ustats, stats);
 
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts);
-       NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes);
-       NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps);
-
+       if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
+           nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
+           nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
+               goto nla_put_failure;
        nla_nest_end(skb, nl_stats);
 
        return 0;
@@ -2847,23 +2847,25 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
        if (!nl_service)
                return -EMSGSIZE;
 
-       NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
-
+       if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
+               goto nla_put_failure;
        if (svc->fwmark) {
-               NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
+               if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
+                       goto nla_put_failure;
        } else {
-               NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
-               NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
-               NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
+               if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
+                   nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
+                   nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port))
+                       goto nla_put_failure;
        }
 
-       NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
-       if (svc->pe)
-               NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name);
-       NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
-       NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
-       NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
-
+       if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) ||
+           (svc->pe &&
+            nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
+           nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
+           nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
+           nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
+               goto nla_put_failure;
        if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
                goto nla_put_failure;
 
@@ -3038,21 +3040,22 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
        if (!nl_dest)
                return -EMSGSIZE;
 
-       NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
-       NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
-
-       NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
-                   atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
-       NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
-       NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
-       NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
-       NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
-                   atomic_read(&dest->activeconns));
-       NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
-                   atomic_read(&dest->inactconns));
-       NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
-                   atomic_read(&dest->persistconns));
-
+       if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
+           nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+                       (atomic_read(&dest->conn_flags) &
+                        IP_VS_CONN_F_FWD_MASK)) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
+                       atomic_read(&dest->weight)) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
+                       atomic_read(&dest->activeconns)) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
+                       atomic_read(&dest->inactconns)) ||
+           nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
+                       atomic_read(&dest->persistconns)))
+               goto nla_put_failure;
        if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
                goto nla_put_failure;
 
@@ -3181,10 +3184,10 @@ static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
        if (!nl_daemon)
                return -EMSGSIZE;
 
-       NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
-       NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
-       NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
-
+       if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
+           nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
+           nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
+               goto nla_put_failure;
        nla_nest_end(skb, nl_daemon);
 
        return 0;
@@ -3473,21 +3476,26 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
 
                __ip_vs_get_timeouts(net, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
-               NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
-               NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
-                           t.tcp_fin_timeout);
+               if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
+                               t.tcp_timeout) ||
+                   nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
+                               t.tcp_fin_timeout))
+                       goto nla_put_failure;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
-               NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
+               if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
+                       goto nla_put_failure;
 #endif
 
                break;
        }
 
        case IPVS_CMD_GET_INFO:
-               NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
-               NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
-                           ip_vs_conn_tab_size);
+               if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
+                               IP_VS_VERSION_CODE) ||
+                   nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
+                               ip_vs_conn_tab_size))
+                       goto nla_put_failure;
                break;
        }
 
index 729f157a0efa690cf877dbd9dbb94dd1b09f1be6..cf0747c5741f8b687d8d8b8572cf8462fc488e34 100644 (file)
@@ -1152,8 +1152,9 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
                               const struct nf_conntrack_tuple *tuple)
 {
-       NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
-       NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
+       if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
+           nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index ca7e8354e4f89d3fd3d6d641e4dd5a0738cadb05..462ec2dbe5618d2b315bcb0978594824e571dccb 100644 (file)
@@ -66,7 +66,8 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb,
        nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
-       NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum);
+       if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
+               goto nla_put_failure;
 
        if (likely(l4proto->tuple_to_nlattr))
                ret = l4proto->tuple_to_nlattr(skb, tuple);
@@ -126,7 +127,8 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
 static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status));
+       if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -141,7 +143,8 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
        if (timeout < 0)
                timeout = 0;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout));
+       if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -190,7 +193,8 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
        nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
        if (!nest_helper)
                goto nla_put_failure;
-       NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name);
+       if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
+               goto nla_put_failure;
 
        if (helper->to_nlattr)
                helper->to_nlattr(skb, ct);
@@ -214,8 +218,9 @@ dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
        if (!nest_count)
                goto nla_put_failure;
 
-       NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
-       NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
+       if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
+           nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest_count);
 
@@ -260,11 +265,10 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
        if (!nest_count)
                goto nla_put_failure;
 
-       NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
-       if (tstamp->stop != 0) {
-               NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
-                            cpu_to_be64(tstamp->stop));
-       }
+       if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
+           (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
+                                              cpu_to_be64(tstamp->stop))))
+               goto nla_put_failure;
        nla_nest_end(skb, nest_count);
 
        return 0;
@@ -277,7 +281,8 @@ nla_put_failure:
 static inline int
 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark));
+       if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -304,7 +309,8 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
        if (!nest_secctx)
                goto nla_put_failure;
 
-       NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
+       if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
+               goto nla_put_failure;
        nla_nest_end(skb, nest_secctx);
 
        ret = 0;
@@ -349,12 +355,13 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
        if (!nest_parms)
                goto nla_put_failure;
 
-       NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS,
-                    htonl(natseq->correction_pos));
-       NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
-                    htonl(natseq->offset_before));
-       NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
-                    htonl(natseq->offset_after));
+       if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
+                        htonl(natseq->correction_pos)) ||
+           nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
+                        htonl(natseq->offset_before)) ||
+           nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
+                        htonl(natseq->offset_after)))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest_parms);
 
@@ -390,7 +397,8 @@ ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
 static inline int
 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct));
+       if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -400,7 +408,8 @@ nla_put_failure:
 static inline int
 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)));
+       if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -440,8 +449,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
                goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct))
-               NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+       if (nf_ct_zone(ct) &&
+           nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+               goto nla_put_failure;
 
        if (ctnetlink_dump_status(skb, ct) < 0 ||
            ctnetlink_dump_timeout(skb, ct) < 0 ||
@@ -617,8 +627,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
                goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct))
-               NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+       if (nf_ct_zone(ct) &&
+           nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+               goto nla_put_failure;
 
        if (ctnetlink_dump_id(skb, ct) < 0)
                goto nla_put_failure;
@@ -1705,7 +1716,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                if (!nest_parms)
                        goto nla_put_failure;
 
-               NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
+               if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
+                       goto nla_put_failure;
 
                nat_tuple.src.l3num = nf_ct_l3num(master);
                nat_tuple.src.u3.ip = exp->saved_ip;
@@ -1718,21 +1730,24 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                nla_nest_end(skb, nest_parms);
        }
 #endif
-       NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
-       NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
-       NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
-       NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
+       if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
+           nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+           nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
+           nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
+               goto nla_put_failure;
        help = nfct_help(master);
        if (help) {
                struct nf_conntrack_helper *helper;
 
                helper = rcu_dereference(help->helper);
-               if (helper)
-                       NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
+               if (helper &&
+                   nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
+                       goto nla_put_failure;
        }
        expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
-       if (expfn != NULL)
-               NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
+       if (expfn != NULL &&
+           nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
+               goto nla_put_failure;
 
        return 0;
 
index 24fdce256cb0a65ee9996dfa7fa2569e499eebd0..a58998d0912fe556e123e2d4c1510638290b38f0 100644 (file)
@@ -643,11 +643,12 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
        nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
-       NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
-       NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE,
-                  ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]);
-       NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
-                    cpu_to_be64(ct->proto.dccp.handshake_seq));
+       if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
+           nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
+                      ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
+           nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+                        cpu_to_be64(ct->proto.dccp.handshake_seq)))
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
        spin_unlock_bh(&ct->lock);
        return 0;
@@ -739,9 +740,10 @@ dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
         const unsigned int *timeouts = data;
        int i;
 
-       for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
-               NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+       for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
+               if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+                       goto nla_put_failure;
+       }
        return 0;
 
 nla_put_failure:
index 835e24c58f0de3ab67977aad017d456a840e28ce..d8923d54b3585bf579c66eaba82dc4ec7d464236 100644 (file)
@@ -90,7 +90,8 @@ generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
 {
        const unsigned int *timeout = data;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
+       if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
+               goto nla_put_failure;
 
        return 0;
 
index 659648c4b14ad50331c996ab6a401e379b23b700..4bf6b4e4b7763197c5698db8993e5362c5ef0911 100644 (file)
@@ -321,10 +321,11 @@ gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
 {
        const unsigned int *timeouts = data;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
-                       htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
-                       htonl(timeouts[GRE_CT_REPLIED] / HZ));
+       if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
+                        htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
+                        htonl(timeouts[GRE_CT_REPLIED] / HZ)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index 72b5088592dc8557b264c4b0f7de4e72fa3e3b76..996db2fa21f7621cacf70fea4b371fafb8c1bae6 100644 (file)
@@ -482,15 +482,12 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
        if (!nest_parms)
                goto nla_put_failure;
 
-       NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state);
-
-       NLA_PUT_BE32(skb,
-                    CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
-                    ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]);
-
-       NLA_PUT_BE32(skb,
-                    CTA_PROTOINFO_SCTP_VTAG_REPLY,
-                    ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
+       if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
+           nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
+                        ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
+           nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
+                        ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
+               goto nla_put_failure;
 
        spin_unlock_bh(&ct->lock);
 
@@ -578,9 +575,10 @@ sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
         const unsigned int *timeouts = data;
        int i;
 
-       for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
-               NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+       for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
+               if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+                       goto nla_put_failure;
+       }
         return 0;
 
 nla_put_failure:
index 0d07a1dcf60504758aace258dd0347f06f305797..4dfbfa840f8a57668bea85ec1d7717ca79a0cb64 100644 (file)
@@ -1147,21 +1147,22 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
        if (!nest_parms)
                goto nla_put_failure;
 
-       NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state);
-
-       NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
-                  ct->proto.tcp.seen[0].td_scale);
-
-       NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
-                  ct->proto.tcp.seen[1].td_scale);
+       if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
+           nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+                      ct->proto.tcp.seen[0].td_scale) ||
+           nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
+                      ct->proto.tcp.seen[1].td_scale))
+               goto nla_put_failure;
 
        tmp.flags = ct->proto.tcp.seen[0].flags;
-       NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
-               sizeof(struct nf_ct_tcp_flags), &tmp);
+       if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+                   sizeof(struct nf_ct_tcp_flags), &tmp))
+               goto nla_put_failure;
 
        tmp.flags = ct->proto.tcp.seen[1].flags;
-       NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
-               sizeof(struct nf_ct_tcp_flags), &tmp);
+       if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
+                   sizeof(struct nf_ct_tcp_flags), &tmp))
+               goto nla_put_failure;
        spin_unlock_bh(&ct->lock);
 
        nla_nest_end(skb, nest_parms);
@@ -1310,28 +1311,29 @@ tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
 {
        const unsigned int *timeouts = data;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
-                       htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
-                       htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
-                       htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
-                       htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
-                       htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
-                       htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
-                       htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
-                       htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
-                       htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
-                       htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
-                       htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
+       if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
+                       htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
+                        htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
+                        htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
+                        htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
+                        htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
+                        htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
+                        htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
+                        htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
+                        htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
+                        htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
+                        htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index a9073dc1548d087fbe694f898ff30072792bc2e6..7259a6bdeb491f4057d5527d1f68ff63a097311b 100644 (file)
@@ -181,10 +181,11 @@ udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
 {
        const unsigned int *timeouts = data;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
-                       htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
-                       htonl(timeouts[UDP_CT_REPLIED] / HZ));
+       if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
+                        htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
+                        htonl(timeouts[UDP_CT_REPLIED] / HZ)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index e0606392cda053a9d1345d465d503e5e6ab20d42..4d60a5376aa6d418d6cf10f889e6a12f348dd9d2 100644 (file)
@@ -185,10 +185,11 @@ udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
 {
        const unsigned int *timeouts = data;
 
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
-                       htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
-                       htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
+       if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
+                        htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
+           nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
+                        htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
index d98c868c148b6cff5f05f555a20547c76ca19f91..b2e7310ca0b8e05d9835c4898f9670993d5ebed3 100644 (file)
@@ -109,7 +109,8 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = 0;
 
-       NLA_PUT_STRING(skb, NFACCT_NAME, acct->name);
+       if (nla_put_string(skb, NFACCT_NAME, acct->name))
+               goto nla_put_failure;
 
        if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
                pkts = atomic64_xchg(&acct->pkts, 0);
@@ -118,9 +119,10 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
                pkts = atomic64_read(&acct->pkts);
                bytes = atomic64_read(&acct->bytes);
        }
-       NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts));
-       NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes));
-       NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)));
+       if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
+           nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
+           nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
+               goto nla_put_failure;
 
        nlmsg_end(skb, nlh);
        return skb->len;
index 2b9e79f5ef057295faed0fd7ad81de718763809f..3e655288d1d6163b23504154b1e11b15c65937e3 100644 (file)
@@ -170,11 +170,12 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = 0;
 
-       NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
-       NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
-       NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto);
-       NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
-                       htonl(atomic_read(&timeout->refcnt)));
+       if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
+           nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
+           nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
+           nla_put_be32(skb, CTA_TIMEOUT_USE,
+                        htonl(atomic_read(&timeout->refcnt))))
+               goto nla_put_failure;
 
        if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
                struct nlattr *nest_parms;
index 66b2c54c544f6bbf87c20500d97452122f2b2fcf..3c3cfc0cc9b5a7495e89c838a06f9bf520605cf0 100644 (file)
@@ -391,67 +391,78 @@ __build_packet_message(struct nfulnl_instance *inst,
        pmsg.hw_protocol        = skb->protocol;
        pmsg.hook               = hooknum;
 
-       NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
+       if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
+               goto nla_put_failure;
 
-       if (prefix)
-               NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);
+       if (prefix &&
+           nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
+               goto nla_put_failure;
 
        if (indev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-               NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-                            htonl(indev->ifindex));
+               if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+                                htonl(indev->ifindex)))
+                       goto nla_put_failure;
 #else
                if (pf == PF_BRIDGE) {
                        /* Case 1: outdev is physical input device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
-                       NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-                                    htonl(indev->ifindex));
+                       if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+                                        htonl(indev->ifindex)) ||
                        /* this is the bridge group "brX" */
                        /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
-                       NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-                                    htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+                           nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+                                        htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+                               goto nla_put_failure;
                } else {
                        /* Case 2: indev is bridge group, we need to look for
                         * physical device (when called from ipv4) */
-                       NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-                                    htonl(indev->ifindex));
-                       if (skb->nf_bridge && skb->nf_bridge->physindev)
-                               NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-                                            htonl(skb->nf_bridge->physindev->ifindex));
+                       if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+                                        htonl(indev->ifindex)))
+                               goto nla_put_failure;
+                       if (skb->nf_bridge && skb->nf_bridge->physindev &&
+                           nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+                                        htonl(skb->nf_bridge->physindev->ifindex)))
+                               goto nla_put_failure;
                }
 #endif
        }
 
        if (outdev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-               NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-                            htonl(outdev->ifindex));
+               if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+                                htonl(outdev->ifindex)))
+                       goto nla_put_failure;
 #else
                if (pf == PF_BRIDGE) {
                        /* Case 1: outdev is physical output device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
-                       NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
-                                    htonl(outdev->ifindex));
+                       if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+                                        htonl(outdev->ifindex)) ||
                        /* this is the bridge group "brX" */
                        /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
-                       NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-                                    htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+                           nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+                                        htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+                               goto nla_put_failure;
                } else {
                        /* Case 2: indev is a bridge group, we need to look
                         * for physical device (when called from ipv4) */
-                       NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-                                    htonl(outdev->ifindex));
-                       if (skb->nf_bridge && skb->nf_bridge->physoutdev)
-                               NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
-                                            htonl(skb->nf_bridge->physoutdev->ifindex));
+                       if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+                                        htonl(outdev->ifindex)))
+                               goto nla_put_failure;
+                       if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
+                           nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+                                        htonl(skb->nf_bridge->physoutdev->ifindex)))
+                               goto nla_put_failure;
                }
 #endif
        }
 
-       if (skb->mark)
-               NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
+       if (skb->mark &&
+           nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
+               goto nla_put_failure;
 
        if (indev && skb->dev &&
            skb->mac_header != skb->network_header) {
@@ -459,16 +470,18 @@ __build_packet_message(struct nfulnl_instance *inst,
                int len = dev_parse_header(skb, phw.hw_addr);
                if (len > 0) {
                        phw.hw_addrlen = htons(len);
-                       NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
+                       if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
+                               goto nla_put_failure;
                }
        }
 
        if (indev && skb_mac_header_was_set(skb)) {
-               NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
-               NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
-                            htons(skb->dev->hard_header_len));
-               NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
-                       skb_mac_header(skb));
+               if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+                   nla_put_be16(inst->skb, NFULA_HWLEN,
+                                htons(skb->dev->hard_header_len)) ||
+                   nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
+                           skb_mac_header(skb)))
+                       goto nla_put_failure;
        }
 
        if (skb->tstamp.tv64) {
@@ -477,7 +490,8 @@ __build_packet_message(struct nfulnl_instance *inst,
                ts.sec = cpu_to_be64(tv.tv_sec);
                ts.usec = cpu_to_be64(tv.tv_usec);
 
-               NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
+               if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
+                       goto nla_put_failure;
        }
 
        /* UID */
@@ -487,22 +501,24 @@ __build_packet_message(struct nfulnl_instance *inst,
                        struct file *file = skb->sk->sk_socket->file;
                        __be32 uid = htonl(file->f_cred->fsuid);
                        __be32 gid = htonl(file->f_cred->fsgid);
-                       /* need to unlock here since NLA_PUT may goto */
                        read_unlock_bh(&skb->sk->sk_callback_lock);
-                       NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
-                       NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
+                       if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
+                           nla_put_be32(inst->skb, NFULA_GID, gid))
+                               goto nla_put_failure;
                } else
                        read_unlock_bh(&skb->sk->sk_callback_lock);
        }
 
        /* local sequence number */
-       if (inst->flags & NFULNL_CFG_F_SEQ)
-               NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));
+       if ((inst->flags & NFULNL_CFG_F_SEQ) &&
+           nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
+               goto nla_put_failure;
 
        /* global sequence number */
-       if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
-               NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
-                            htonl(atomic_inc_return(&global_seq)));
+       if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
+           nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
+                        htonl(atomic_inc_return(&global_seq))))
+               goto nla_put_failure;
 
        if (data_len) {
                struct nlattr *nla;
index a80b0cb03f17adde8f8a7a5a0ae6279aace2d5d0..8d6bcf32c0ed93ec8903e267e95b8271d98b4b9a 100644 (file)
@@ -288,58 +288,67 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        indev = entry->indev;
        if (indev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-               NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
+               if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
+                       goto nla_put_failure;
 #else
                if (entry->pf == PF_BRIDGE) {
                        /* Case 1: indev is physical input device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
-                       NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
-                                    htonl(indev->ifindex));
+                       if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+                                        htonl(indev->ifindex)) ||
                        /* this is the bridge group "brX" */
                        /* rcu_read_lock()ed by __nf_queue */
-                       NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-                                    htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+                           nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+                                        htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+                               goto nla_put_failure;
                } else {
                        /* Case 2: indev is bridge group, we need to look for
                         * physical device (when called from ipv4) */
-                       NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-                                    htonl(indev->ifindex));
-                       if (entskb->nf_bridge && entskb->nf_bridge->physindev)
-                               NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
-                                            htonl(entskb->nf_bridge->physindev->ifindex));
+                       if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+                                        htonl(indev->ifindex)))
+                               goto nla_put_failure;
+                       if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
+                           nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+                                        htonl(entskb->nf_bridge->physindev->ifindex)))
+                               goto nla_put_failure;
                }
 #endif
        }
 
        if (outdev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-               NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
+               if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
+                       goto nla_put_failure;
 #else
                if (entry->pf == PF_BRIDGE) {
                        /* Case 1: outdev is physical output device, we need to
                         * look for bridge group (when called from
                         * netfilter_bridge) */
-                       NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
-                                    htonl(outdev->ifindex));
+                       if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+                                        htonl(outdev->ifindex)) ||
                        /* this is the bridge group "brX" */
                        /* rcu_read_lock()ed by __nf_queue */
-                       NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-                                    htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+                           nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+                                        htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+                               goto nla_put_failure;
                } else {
                        /* Case 2: outdev is bridge group, we need to look for
                         * physical output device (when called from ipv4) */
-                       NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-                                    htonl(outdev->ifindex));
-                       if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
-                               NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
-                                            htonl(entskb->nf_bridge->physoutdev->ifindex));
+                       if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+                                        htonl(outdev->ifindex)))
+                               goto nla_put_failure;
+                       if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
+                           nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+                                        htonl(entskb->nf_bridge->physoutdev->ifindex)))
+                               goto nla_put_failure;
                }
 #endif
        }
 
-       if (entskb->mark)
-               NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
+       if (entskb->mark &&
+           nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
+               goto nla_put_failure;
 
        if (indev && entskb->dev &&
            entskb->mac_header != entskb->network_header) {
@@ -347,7 +356,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                int len = dev_parse_header(entskb, phw.hw_addr);
                if (len) {
                        phw.hw_addrlen = htons(len);
-                       NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
+                       if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
+                               goto nla_put_failure;
                }
        }
 
@@ -357,7 +367,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                ts.sec = cpu_to_be64(tv.tv_sec);
                ts.usec = cpu_to_be64(tv.tv_usec);
 
-               NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
+               if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
+                       goto nla_put_failure;
        }
 
        if (data_len) {
index 9f40441d7a7d1771d140bff1382bcba319d57759..8340ace837f2eb309a707d84a8d71da0fa282bbd 100644 (file)
@@ -635,11 +635,12 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
        if (hdr == NULL)
                return -1;
 
-       NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name);
-       NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id);
-       NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version);
-       NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize);
-       NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr);
+       if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
+           nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
+           nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
+           nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
+           nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
+               goto nla_put_failure;
 
        if (!list_empty(&family->ops_list)) {
                struct nlattr *nla_ops;
@@ -657,8 +658,9 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
                        if (nest == NULL)
                                goto nla_put_failure;
 
-                       NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
-                       NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
+                       if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
+                           nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags))
+                               goto nla_put_failure;
 
                        nla_nest_end(skb, nest);
                }
@@ -682,9 +684,10 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
                        if (nest == NULL)
                                goto nla_put_failure;
 
-                       NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
-                       NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
-                                      grp->name);
+                       if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+                           nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+                                          grp->name))
+                               goto nla_put_failure;
 
                        nla_nest_end(skb, nest);
                }
@@ -710,8 +713,9 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
        if (hdr == NULL)
                return -1;
 
-       NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name);
-       NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id);
+       if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) ||
+           nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id))
+               goto nla_put_failure;
 
        nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
        if (nla_grps == NULL)
@@ -721,9 +725,10 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
        if (nest == NULL)
                goto nla_put_failure;
 
-       NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
-       NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
-                      grp->name);
+       if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+           nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+                          grp->name))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest);
        nla_nest_end(skb, nla_grps);
index 6404052d6c070ccf8f54c4827408d5f9291d6b67..8937664674fae623f4ec8e27fad62e5d2e8c9ec4 100644 (file)
@@ -63,19 +63,23 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
 
        genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
 
-       NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
-       NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols);
-       NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
-       NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
-       if (target->nfcid1_len > 0)
-               NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
-                       target->nfcid1);
-       if (target->sensb_res_len > 0)
-               NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
-                       target->sensb_res);
-       if (target->sensf_res_len > 0)
-               NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
-                       target->sensf_res);
+       if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
+           nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
+           nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) ||
+           nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res))
+               goto nla_put_failure;
+       if (target->nfcid1_len > 0 &&
+           nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
+                   target->nfcid1))
+               goto nla_put_failure;
+       if (target->sensb_res_len > 0 &&
+           nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
+                   target->sensb_res))
+               goto nla_put_failure;
+       if (target->sensf_res_len > 0 &&
+           nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
+                   target->sensf_res))
+               goto nla_put_failure;
 
        return genlmsg_end(msg, hdr);
 
@@ -170,7 +174,8 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
        if (!hdr)
                goto free_msg;
 
-       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -197,10 +202,11 @@ int nfc_genl_device_added(struct nfc_dev *dev)
        if (!hdr)
                goto free_msg;
 
-       NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
-       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-       NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
-       NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+       if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+           nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+           nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+           nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -229,7 +235,8 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
        if (!hdr)
                goto free_msg;
 
-       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -259,10 +266,11 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
        if (cb)
                genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
 
-       NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
-       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-       NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
-       NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+       if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+           nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+           nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+           nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+               goto nla_put_failure;
 
        return genlmsg_end(msg, hdr);
 
@@ -339,11 +347,14 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
        if (!hdr)
                goto free_msg;
 
-       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-       if (rf_mode == NFC_RF_INITIATOR)
-               NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
-       NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
-       NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+               goto nla_put_failure;
+       if (rf_mode == NFC_RF_INITIATOR &&
+           nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
+               goto nla_put_failure;
+       if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) ||
+           nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -376,7 +387,8 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
        if (!hdr)
                goto free_msg;
 
-       NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
index e44e631ea952d3aadd1770f1e85b69b38b004325..f86de29979ef317351252e7284389624b5541d9e 100644 (file)
@@ -778,15 +778,18 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
        tcp_flags = flow->tcp_flags;
        spin_unlock_bh(&flow->lock);
 
-       if (used)
-               NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+       if (used &&
+           nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
+               goto nla_put_failure;
 
-       if (stats.n_packets)
-               NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
-                       sizeof(struct ovs_flow_stats), &stats);
+       if (stats.n_packets &&
+           nla_put(skb, OVS_FLOW_ATTR_STATS,
+                   sizeof(struct ovs_flow_stats), &stats))
+               goto nla_put_failure;
 
-       if (tcp_flags)
-               NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+       if (tcp_flags &&
+           nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
+               goto nla_put_failure;
 
        /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
         * this is the first flow to be dumped into 'skb'.  This is unusual for
@@ -1168,7 +1171,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
                goto nla_put_failure;
 
        get_dp_stats(dp, &dp_stats);
-       NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+       if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
+               goto nla_put_failure;
 
        return genlmsg_end(skb, ovs_header);
 
@@ -1468,14 +1472,16 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
 
        ovs_header->dp_ifindex = get_dpifindex(vport->dp);
 
-       NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
-       NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
-       NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
-       NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+       if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
+           nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
+           nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
+           nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
+               goto nla_put_failure;
 
        ovs_vport_get_stats(vport, &vport_stats);
-       NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
-               &vport_stats);
+       if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+                   &vport_stats))
+               goto nla_put_failure;
 
        err = ovs_vport_get_options(vport, skb);
        if (err == -EMSGSIZE)
index 1252c3081ef12740a0b818fbd58ae3ab6e9b870e..7cb416381e87e3e108c6776f6d7669bd266693c0 100644 (file)
@@ -1174,11 +1174,13 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
        struct ovs_key_ethernet *eth_key;
        struct nlattr *nla, *encap;
 
-       if (swkey->phy.priority)
-               NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+       if (swkey->phy.priority &&
+           nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
+               goto nla_put_failure;
 
-       if (swkey->phy.in_port != USHRT_MAX)
-               NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+       if (swkey->phy.in_port != USHRT_MAX &&
+           nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
+               goto nla_put_failure;
 
        nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
        if (!nla)
@@ -1188,8 +1190,9 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
        memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
 
        if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
-               NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
-               NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+               if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
+                   nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
+                       goto nla_put_failure;
                encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
                if (!swkey->eth.tci)
                        goto unencap;
@@ -1200,7 +1203,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
        if (swkey->eth.type == htons(ETH_P_802_2))
                goto unencap;
 
-       NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+       if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
+               goto nla_put_failure;
 
        if (swkey->eth.type == htons(ETH_P_IP)) {
                struct ovs_key_ipv4 *ipv4_key;
index d61f6761777de54b547c5634853a0a3d7630765e..cfdf135fcd69332e9cb497315f9b6beb39097126 100644 (file)
@@ -116,7 +116,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
        ifm->ifa_flags = IFA_F_PERMANENT;
        ifm->ifa_scope = RT_SCOPE_LINK;
        ifm->ifa_index = dev->ifindex;
-       NLA_PUT_U8(skb, IFA_LOCAL, addr);
+       if (nla_put_u8(skb, IFA_LOCAL, addr))
+               goto nla_put_failure;
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -183,8 +184,9 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_type = RTN_UNICAST;
        rtm->rtm_flags = 0;
-       NLA_PUT_U8(skb, RTA_DST, dst);
-       NLA_PUT_U32(skb, RTA_OIF, dev->ifindex);
+       if (nla_put_u8(skb, RTA_DST, dst) ||
+           nla_put_u32(skb, RTA_OIF, dev->ifindex))
+               goto nla_put_failure;
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
index 93fdf131bd75e3e8e1969f1889c9d2a2cff90c67..5cfb160df0632f4ae206248a5e34cecdda0d707b 100644 (file)
@@ -127,7 +127,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
        nest = nla_nest_start(skb, a->order);
        if (nest == NULL)
                goto nla_put_failure;
-       NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+       if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+               goto nla_put_failure;
        for (i = 0; i < (hinfo->hmask + 1); i++) {
                p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
 
@@ -139,7 +140,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
                        p = s_p;
                }
        }
-       NLA_PUT_U32(skb, TCA_FCNT, n_i);
+       if (nla_put_u32(skb, TCA_FCNT, n_i))
+               goto nla_put_failure;
        nla_nest_end(skb, nest);
 
        return n_i;
@@ -437,7 +439,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        if (a->ops == NULL || a->ops->dump == NULL)
                return err;
 
-       NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+       if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+               goto nla_put_failure;
        if (tcf_action_copy_stats(skb, a, 0))
                goto nla_put_failure;
        nest = nla_nest_start(skb, TCA_OPTIONS);
index 453a73431ac47b8688a207b22d90f647dfc3eef4..882124ceb70c1be6ef1c87105d276a5e84e506ba 100644 (file)
@@ -550,11 +550,13 @@ static int tcf_csum_dump(struct sk_buff *skb,
        };
        struct tcf_t t;
 
-       NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-       NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
+       if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
+               goto nla_put_failure;
 
        return skb->len;
 
index b77f5a06a658f56bf3733ab72918aa63aedd1c16..f10fb8256442014afbba6b85a23a3544ee0ec9e4 100644 (file)
@@ -162,7 +162,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        };
        struct tcf_t t;
 
-       NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
 #ifdef CONFIG_GACT_PROB
        if (gact->tcfg_ptype) {
                struct tc_gact_p p_opt = {
@@ -171,13 +172,15 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
                        .ptype   = gact->tcfg_ptype,
                };
 
-               NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
+               if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
+                       goto nla_put_failure;
        }
 #endif
        t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
-       NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t);
+       if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
index 60f8f616e8fa2c7b73df63dff52119622f036168..0beba0e5312e02bbb250657ba32f8b1c7a2570c6 100644 (file)
@@ -267,15 +267,17 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        c.refcnt = ipt->tcf_refcnt - ref;
        strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
 
-       NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t);
-       NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index);
-       NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook);
-       NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c);
-       NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname);
+       if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
+           nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
+           nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
+           nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
+           nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
+               goto nla_put_failure;
        tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
        tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
        tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
-       NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm);
+       if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
+               goto nla_put_failure;
        kfree(t);
        return skb->len;
 
index e051398fdf6baf4834bbe21bf13deac081594148..d583aea3b3dfac850b1e953b502e5ef05f13dc80 100644 (file)
@@ -227,11 +227,13 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
        };
        struct tcf_t t;
 
-       NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
        t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
-       NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t);
+       if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
index 001d1b354869a18f06df2b7fd0fee14870bdb343..b5d029eb44f23216cce377506b39e9bbf7997dc3 100644 (file)
@@ -284,11 +284,13 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
        };
        struct tcf_t t;
 
-       NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-       NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
+       if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
+               goto nla_put_failure;
 
        return skb->len;
 
index 10d3aed86560973f0b91daabcd3ddfb62777c3a9..26aa2f6ce257c5b39541df44db40c950fcd67045 100644 (file)
@@ -215,11 +215,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        opt->refcnt = p->tcf_refcnt - ref;
        opt->bindcnt = p->tcf_bindcnt - bind;
 
-       NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt);
+       if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
+               goto nla_put_failure;
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-       NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
+       if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
+               goto nla_put_failure;
        kfree(opt);
        return skb->len;
 
index 6fb3f5af0f85c69e6be7c90f71242fa073f87691..a9de23297d47759f3fa50113b1bf190f51bf4063 100644 (file)
@@ -356,11 +356,14 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
                opt.rate = police->tcfp_R_tab->rate;
        if (police->tcfp_P_tab)
                opt.peakrate = police->tcfp_P_tab->rate;
-       NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
-       if (police->tcfp_result)
-               NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
-       if (police->tcfp_ewma_rate)
-               NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate);
+       if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
+               goto nla_put_failure;
+       if (police->tcfp_result &&
+           nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
+               goto nla_put_failure;
+       if (police->tcfp_ewma_rate &&
+           nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
index 73e0a3ab4d55a0150045512771036330e4ca737b..3922f2a2821b83cf3f9db318980a7cad06111e1f 100644 (file)
@@ -172,12 +172,14 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
        };
        struct tcf_t t;
 
-       NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
-       NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
+       if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
+           nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
+               goto nla_put_failure;
        t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-       NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t);
+       if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
index 35dbbe91027e3734e1b9ccd52b5b9ce2edbcbf5a..476e0fac6712292a133d5645f98ca889af52a8c4 100644 (file)
@@ -166,20 +166,25 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
        };
        struct tcf_t t;
 
-       NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
-       if (d->flags & SKBEDIT_F_PRIORITY)
-               NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
-                       &d->priority);
-       if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
-               NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
-                       sizeof(d->queue_mapping), &d->queue_mapping);
-       if (d->flags & SKBEDIT_F_MARK)
-               NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
-                       &d->mark);
+       if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
+       if ((d->flags & SKBEDIT_F_PRIORITY) &&
+           nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
+                   &d->priority))
+               goto nla_put_failure;
+       if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+           nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
+                   sizeof(d->queue_mapping), &d->queue_mapping))
+               goto nla_put_failure;
+       if ((d->flags & SKBEDIT_F_MARK) &&
+           nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
+                   &d->mark))
+               goto nla_put_failure;
        t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-       NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t);
+       if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
index a69d44f1dac51786bed33220446629761df6de85..f452f696b4b358e1789f60349993616c049c0396 100644 (file)
@@ -357,7 +357,8 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
        tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
        tcm->tcm_parent = tp->classid;
        tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
-       NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
+       if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
+               goto nla_put_failure;
        tcm->tcm_handle = fh;
        if (RTM_DELTFILTER != event) {
                tcm->tcm_handle = 0;
index ea1f70b5a5f4747f2988c0b354a0abac7f2443da..590960a22a77fb0747c2fc42c78caff686cd8ac8 100644 (file)
@@ -257,8 +257,9 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (f->res.classid)
-               NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid);
+       if (f->res.classid &&
+           nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
+               goto nla_put_failure;
 
        if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
            tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
index 1d8bd0dbcd1fff38ed191b89decb3f0afb0456af..ccd08c8dc6a72b18f0c7c45f743fa4912f954413 100644 (file)
@@ -572,25 +572,32 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
        if (nest == NULL)
                goto nla_put_failure;
 
-       NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
-       NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
+       if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
+           nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
+               goto nla_put_failure;
 
        if (f->mask != ~0 || f->xor != 0) {
-               NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
-               NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
+               if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
+                   nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
+                       goto nla_put_failure;
        }
-       if (f->rshift)
-               NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
-       if (f->addend)
-               NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
+       if (f->rshift &&
+           nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
+               goto nla_put_failure;
+       if (f->addend &&
+           nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
+               goto nla_put_failure;
 
-       if (f->divisor)
-               NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
-       if (f->baseclass)
-               NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
+       if (f->divisor &&
+           nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
+               goto nla_put_failure;
+       if (f->baseclass &&
+           nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
+               goto nla_put_failure;
 
-       if (f->perturb_period)
-               NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
+       if (f->perturb_period &&
+           nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
+               goto nla_put_failure;
 
        if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
                goto nla_put_failure;
index 389af152ec45ae2e3a50313dd36a5797ef4a4048..8384a47972403360c22688d8daa1ebdf34338aad 100644 (file)
@@ -346,14 +346,17 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (f->res.classid)
-               NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid);
+       if (f->res.classid &&
+           nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
+               goto nla_put_failure;
 #ifdef CONFIG_NET_CLS_IND
-       if (strlen(f->indev))
-               NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev);
+       if (strlen(f->indev) &&
+           nla_put_string(skb, TCA_FW_INDEV, f->indev))
+               goto nla_put_failure;
 #endif /* CONFIG_NET_CLS_IND */
-       if (head->mask != 0xFFFFFFFF)
-               NLA_PUT_U32(skb, TCA_FW_MASK, head->mask);
+       if (head->mask != 0xFFFFFFFF &&
+           nla_put_u32(skb, TCA_FW_MASK, head->mask))
+               goto nla_put_failure;
 
        if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
                goto nla_put_failure;
index 13ab66e9df585d884d687feaa77baf6bc017c8c5..36fec422740154b4addb97e20410da27f1e98622 100644 (file)
@@ -571,17 +571,21 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
 
        if (!(f->handle & 0x8000)) {
                id = f->id & 0xFF;
-               NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
+               if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
+                       goto nla_put_failure;
        }
        if (f->handle & 0x80000000) {
-               if ((f->handle >> 16) != 0xFFFF)
-                       NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
+               if ((f->handle >> 16) != 0xFFFF &&
+                   nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
+                       goto nla_put_failure;
        } else {
                id = f->id >> 16;
-               NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
+               if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
+                       goto nla_put_failure;
        }
-       if (f->res.classid)
-               NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
+       if (f->res.classid &&
+           nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
+               goto nla_put_failure;
 
        if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
                goto nla_put_failure;
index b01427924f8112dd0b923287976f285fd7674fbf..18ab93ec8d7e3abb9b25514d09485d4a3e3b21c4 100644 (file)
@@ -615,18 +615,22 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
        if (nest == NULL)
                goto nla_put_failure;
 
-       NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst);
+       if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
+               goto nla_put_failure;
        pinfo.dpi = s->dpi;
        pinfo.spi = f->spi;
        pinfo.protocol = s->protocol;
        pinfo.tunnelid = s->tunnelid;
        pinfo.tunnelhdr = f->tunnelhdr;
        pinfo.pad = 0;
-       NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
-       if (f->res.classid)
-               NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
-       if (((f->handle >> 8) & 0xFF) != 16)
-               NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
+       if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
+               goto nla_put_failure;
+       if (f->res.classid &&
+           nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
+               goto nla_put_failure;
+       if (((f->handle >> 8) & 0xFF) != 16 &&
+           nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
+               goto nla_put_failure;
 
        if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
                goto nla_put_failure;
index dbe199234c6384bd553c5ac743600ac9886bff3b..fe29420d0b0e5a4241dbe7a5190c2f0686ce1fa0 100644 (file)
@@ -438,10 +438,11 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
 
        if (!fh) {
                t->tcm_handle = ~0; /* whatever ... */
-               NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash);
-               NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask);
-               NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift);
-               NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through);
+               if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
+                   nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
+                   nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
+                   nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
+                       goto nla_put_failure;
                nla_nest_end(skb, nest);
        } else {
                if (p->perfect) {
@@ -460,8 +461,9 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
                        }
                }
                pr_debug("handle = %d\n", t->tcm_handle);
-               if (r->res.class)
-                       NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid);
+               if (r->res.class &&
+                   nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
+                       goto nla_put_failure;
 
                if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
                        goto nla_put_failure;
index 939b627b4795f7852323fbd8673a47b43ac1e61b..591b006a8c5a0a5d37b3222e639849a338b2c722 100644 (file)
@@ -733,36 +733,44 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
                u32 divisor = ht->divisor + 1;
 
-               NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
+               if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
+                       goto nla_put_failure;
        } else {
-               NLA_PUT(skb, TCA_U32_SEL,
-                       sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
-                       &n->sel);
+               if (nla_put(skb, TCA_U32_SEL,
+                           sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
+                           &n->sel))
+                       goto nla_put_failure;
                if (n->ht_up) {
                        u32 htid = n->handle & 0xFFFFF000;
-                       NLA_PUT_U32(skb, TCA_U32_HASH, htid);
+                       if (nla_put_u32(skb, TCA_U32_HASH, htid))
+                               goto nla_put_failure;
                }
-               if (n->res.classid)
-                       NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
-               if (n->ht_down)
-                       NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
+               if (n->res.classid &&
+                   nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
+                       goto nla_put_failure;
+               if (n->ht_down &&
+                   nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
+                       goto nla_put_failure;
 
 #ifdef CONFIG_CLS_U32_MARK
-               if (n->mark.val || n->mark.mask)
-                       NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
+               if ((n->mark.val || n->mark.mask) &&
+                   nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
+                       goto nla_put_failure;
 #endif
 
                if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
                        goto nla_put_failure;
 
 #ifdef CONFIG_NET_CLS_IND
-               if (strlen(n->indev))
-                       NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
+               if (strlen(n->indev) &&
+                   nla_put_string(skb, TCA_U32_INDEV, n->indev))
+                       goto nla_put_failure;
 #endif
 #ifdef CONFIG_CLS_U32_PERF
-               NLA_PUT(skb, TCA_U32_PCNT,
-               sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
-                       n->pf);
+               if (nla_put(skb, TCA_U32_PCNT,
+                           sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
+                           n->pf))
+                       goto nla_put_failure;
 #endif
        }
 
index 1363bf14e61b676567bd4f6de3166b370534fc32..4790c696cbce8364af6552d2028ed0a1880e3a21 100644 (file)
@@ -585,8 +585,9 @@ static void meta_var_apply_extras(struct meta_value *v,
 
 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
-       if (v->val && v->len)
-               NLA_PUT(skb, tlv, v->len, (void *) v->val);
+       if (v->val && v->len &&
+           nla_put(skb, tlv, v->len, (void *) v->val))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -636,10 +637,13 @@ static void meta_int_apply_extras(struct meta_value *v,
 
 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
-       if (v->len == sizeof(unsigned long))
-               NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
-       else if (v->len == sizeof(u32))
-               NLA_PUT_U32(skb, tlv, v->val);
+       if (v->len == sizeof(unsigned long)) {
+               if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
+                       goto nla_put_failure;
+       } else if (v->len == sizeof(u32)) {
+               if (nla_put_u32(skb, tlv, v->val))
+                       goto nla_put_failure;
+       }
 
        return 0;
 
@@ -831,7 +835,8 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
        memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
        memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
 
-       NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr);
+       if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
+               goto nla_put_failure;
 
        ops = meta_type_ops(&meta->lvalue);
        if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
index 88d93eb92507be2f80fd90db12a63b11a7e87aeb..aca233c2b84814fd990e7b702290bf87c0a99ef7 100644 (file)
@@ -441,7 +441,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
        if (top_start == NULL)
                goto nla_put_failure;
 
-       NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr);
+       if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
+               goto nla_put_failure;
 
        list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
        if (list_start == NULL)
@@ -457,7 +458,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
                        .flags = em->flags
                };
 
-               NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
+               if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
+                       goto nla_put_failure;
 
                if (em->ops && em->ops->dump) {
                        if (em->ops->dump(skb, em) < 0)
index 3d8981fde3019ae09130528c3f08e4d632385f2f..d2daefcc205f675bc82500629608dcd0ca90a928 100644 (file)
@@ -426,7 +426,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
        nest = nla_nest_start(skb, TCA_STAB);
        if (nest == NULL)
                goto nla_put_failure;
-       NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
+       if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
+               goto nla_put_failure;
        nla_nest_end(skb, nest);
 
        return skb->len;
@@ -1201,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        tcm->tcm_parent = clid;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = atomic_read(&q->refcnt);
-       NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+       if (nla_put_string(skb, TCA_KIND, q->ops->id))
+               goto nla_put_failure;
        if (q->ops->dump && q->ops->dump(q, skb) < 0)
                goto nla_put_failure;
        q->qstats.qlen = q->q.qlen;
@@ -1505,7 +1507,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        tcm->tcm_parent = q->handle;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = 0;
-       NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+       if (nla_put_string(skb, TCA_KIND, q->ops->id))
+               goto nla_put_failure;
        if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
                goto nla_put_failure;
 
index e25e49061a0d4370ee02f759ab37dc09f3083d03..a77a4fbc069acc911ab7212d303149189f82a7a3 100644 (file)
@@ -601,7 +601,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
        if (nest == NULL)
                goto nla_put_failure;
 
-       NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
+       if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
+               goto nla_put_failure;
        if (flow->vcc) {
                struct sockaddr_atmpvc pvc;
                int state;
@@ -610,15 +611,19 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
                pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
                pvc.sap_addr.vpi = flow->vcc->vpi;
                pvc.sap_addr.vci = flow->vcc->vci;
-               NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
+               if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
+                       goto nla_put_failure;
                state = ATM_VF2VS(flow->vcc->flags);
-               NLA_PUT_U32(skb, TCA_ATM_STATE, state);
+               if (nla_put_u32(skb, TCA_ATM_STATE, state))
+                       goto nla_put_failure;
+       }
+       if (flow->excess) {
+               if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
+                       goto nla_put_failure;
+       } else {
+               if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
+                       goto nla_put_failure;
        }
-       if (flow->excess)
-               NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
-       else
-               NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
-
        nla_nest_end(skb, nest);
        return skb->len;
 
index 24d94c097b35f34bfee4ff5aed2a724e8549176d..6aabd77d1cfdd5cddd34b55dc69699cae9956e57 100644 (file)
@@ -1425,7 +1425,8 @@ static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
 
-       NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate);
+       if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
@@ -1450,7 +1451,8 @@ static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
        opt.minidle = (u32)(-cl->minidle);
        opt.offtime = cl->offtime;
        opt.change = ~0;
-       NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
@@ -1468,7 +1470,8 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
        opt.priority = cl->priority + 1;
        opt.cpriority = cl->cpriority + 1;
        opt.weight = cl->weight;
-       NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
@@ -1485,7 +1488,8 @@ static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
        opt.priority2 = cl->priority2 + 1;
        opt.pad = 0;
        opt.penalty = cl->penalty;
-       NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
@@ -1502,7 +1506,8 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
                opt.split = cl->split ? cl->split->common.classid : 0;
                opt.defmap = cl->defmap;
                opt.defchange = ~0;
-               NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
+               if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
+                       goto nla_put_failure;
        }
        return skb->len;
 
@@ -1521,7 +1526,8 @@ static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
                opt.police = cl->police;
                opt.__res1 = 0;
                opt.__res2 = 0;
-               NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
+               if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
+                       goto nla_put_failure;
        }
        return skb->len;
 
index 7e267d7b9c75730ef8351798ec1540b92e31f101..81445cc8196f72c3ad7f3d06ef99e4016be1854f 100644 (file)
@@ -515,8 +515,9 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
 
-       NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
-       NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
+       if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
+           nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
+               goto nla_put_failure;
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index 6b7fe4a84f138b903928a8c5f5f90f8fe3b855dc..c2189879359bf7530796a0ed2f04ba8d50aa8981 100644 (file)
@@ -260,7 +260,8 @@ static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
-       NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum);
+       if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
+               goto nla_put_failure;
        return nla_nest_end(skb, nest);
 
 nla_put_failure:
index 2c790204d042e3ac5ec4f2b322b8a0429628dbb5..389b856c66537eb14b2b7fe0aae49883c503d18f 100644 (file)
@@ -429,8 +429,9 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
        opts = nla_nest_start(skb, TCA_OPTIONS);
        if (opts == NULL)
                goto nla_put_failure;
-       NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
-       NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
+       if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) ||
+           nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]))
+               goto nla_put_failure;
 
        return nla_nest_end(skb, opts);
 
@@ -447,13 +448,16 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
        opts = nla_nest_start(skb, TCA_OPTIONS);
        if (opts == NULL)
                goto nla_put_failure;
-       NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices);
+       if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
+               goto nla_put_failure;
 
-       if (p->default_index != NO_DEFAULT_INDEX)
-               NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index);
+       if (p->default_index != NO_DEFAULT_INDEX &&
+           nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
+               goto nla_put_failure;
 
-       if (p->set_tc_index)
-               NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX);
+       if (p->set_tc_index &&
+           nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
+               goto nla_put_failure;
 
        return nla_nest_end(skb, opts);
 
index 66effe2da8e0e65775c563a4fc4771429db45411..e15a9eb29087794ccc71408f467e9ae37f9c086b 100644 (file)
@@ -85,7 +85,8 @@ static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct tc_fifo_qopt opt = { .limit = sch->limit };
 
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
index 67fc573e013a063b524fa8b85512a28c887689f9..0eb1202c22a64c75f36499b6e91d726ed1597e8d 100644 (file)
@@ -512,7 +512,8 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
        struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 
        memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+               goto nla_put_failure;
        return skb->len;
 
 nla_put_failure:
index 0b15236be7b609251199a36b5fdd0f9b6075b18f..55e3310edc94fee8595b7ca53aa2bb60281485bb 100644 (file)
@@ -521,14 +521,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        opts = nla_nest_start(skb, TCA_OPTIONS);
        if (opts == NULL)
                goto nla_put_failure;
-       NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+       if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
+               goto nla_put_failure;
 
        for (i = 0; i < MAX_DPs; i++) {
                struct gred_sched_data *q = table->tab[i];
 
                max_p[i] = q ? q->parms.max_P : 0;
        }
-       NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+       if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
+               goto nla_put_failure;
 
        parms = nla_nest_start(skb, TCA_GRED_PARMS);
        if (parms == NULL)
index 9bdca2e011e9122de321353037d4ed2891008601..8db3e2c72827ff28094b20e34f0b26b3815d0437 100644 (file)
@@ -1305,7 +1305,8 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
        tsc.m1 = sm2m(sc->sm1);
        tsc.d  = dx2d(sc->dx);
        tsc.m2 = sm2m(sc->sm2);
-       NLA_PUT(skb, attr, sizeof(tsc), &tsc);
+       if (nla_put(skb, attr, sizeof(tsc), &tsc))
+               goto nla_put_failure;
 
        return skb->len;
 
@@ -1573,7 +1574,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        }
 
        qopt.defcls = q->defcls;
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+               goto nla_put_failure;
        return skb->len;
 
  nla_put_failure:
index 29b942ce9e824866c4d74de75a38c652f1999cb3..2ea6f196e3c823bcf6760af88b554248d93f165a 100644 (file)
@@ -1051,7 +1051,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
-       NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
+       if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
+               goto nla_put_failure;
        nla_nest_end(skb, nest);
 
        spin_unlock_bh(root_lock);
@@ -1090,7 +1091,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
        opt.quantum = cl->quantum;
        opt.prio = cl->prio;
        opt.level = cl->level;
-       NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest);
        spin_unlock_bh(root_lock);
index 28de43092330abc125423d5328babc709b1f986f..d1831ca966d4540a2ced9eb88539817773e36898 100644 (file)
@@ -247,7 +247,8 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
                opt.offset[i] = dev->tc_to_txq[i].offset;
        }
 
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+               goto nla_put_failure;
 
        return skb->len;
 nla_put_failure:
index 49131d7a7446eafb22c121ce987f0fd7e4e233a5..2a2b096d9a664b12a2fa7b4b9a91e4ff9b233f27 100644 (file)
@@ -284,7 +284,8 @@ static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
        opt.bands = q->bands;
        opt.max_bands = q->max_bands;
 
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+               goto nla_put_failure;
 
        return skb->len;
 
index 5da548fa7ae9d46de78f5b75152e1a84b99ca9a7..110973145a4bcd4a25d0521df8afa667790ed79f 100644 (file)
@@ -834,7 +834,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
                        .p23 = q->clg.a5,
                };
 
-               NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+               if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
+                       goto nla_put_failure;
                break;
        }
        case CLG_GILB_ELL: {
@@ -845,7 +846,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
                        .k1 = q->clg.a4,
                };
 
-               NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+               if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
+                       goto nla_put_failure;
                break;
        }
        }
@@ -874,26 +876,31 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        qopt.loss = q->loss;
        qopt.gap = q->gap;
        qopt.duplicate = q->duplicate;
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+               goto nla_put_failure;
 
        cor.delay_corr = q->delay_cor.rho;
        cor.loss_corr = q->loss_cor.rho;
        cor.dup_corr = q->dup_cor.rho;
-       NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
+       if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
+               goto nla_put_failure;
 
        reorder.probability = q->reorder;
        reorder.correlation = q->reorder_cor.rho;
-       NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
+       if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
+               goto nla_put_failure;
 
        corrupt.probability = q->corrupt;
        corrupt.correlation = q->corrupt_cor.rho;
-       NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
+       if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
+               goto nla_put_failure;
 
        rate.rate = q->rate;
        rate.packet_overhead = q->packet_overhead;
        rate.cell_size = q->cell_size;
        rate.cell_overhead = q->cell_overhead;
-       NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+       if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
+               goto nla_put_failure;
 
        if (dump_loss_model(q, skb) != 0)
                goto nla_put_failure;
index b5d56a22b1d20f2605771ed8360a75f5d2f8ce75..79359b69ad8d6fcf35b6937673d4ae332bbf5158 100644 (file)
@@ -247,7 +247,8 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
        opt.bands = q->bands;
        memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+               goto nla_put_failure;
 
        return skb->len;
 
index e68cb440756a4c33ce1108d464ea51b1c0b95631..9af01f3df18c66b6325c8afe628758b56e306952 100644 (file)
@@ -429,8 +429,9 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
-       NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
-       NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
+       if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
+           nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
+               goto nla_put_failure;
        return nla_nest_end(skb, nest);
 
 nla_put_failure:
index a5cc3012cf42902a496276d8c4e0932b6ae4b53f..633e32defdcc61291554e9457d5e13c2f6b87a1e 100644 (file)
@@ -272,8 +272,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        opts = nla_nest_start(skb, TCA_OPTIONS);
        if (opts == NULL)
                goto nla_put_failure;
-       NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
-       NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
+       if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
+           nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
+               goto nla_put_failure;
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index d7eea99333e96a33a953106a3a0f2fdb32a12385..74305c883bd3ee842c535bbc27f003d389207346 100644 (file)
@@ -570,7 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
 
        sch->qstats.backlog = q->qdisc->qstats.backlog;
        opts = nla_nest_start(skb, TCA_OPTIONS);
-       NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index 02a21abea65e20c207fe77d81f0240957c259970..d3a1bc26dbfc536350ebaaecfdcb5426b7eacec5 100644 (file)
@@ -812,7 +812,8 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
        memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
        opt.flags       = q->flags;
 
-       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+               goto nla_put_failure;
 
        return skb->len;
 
index b8e156319d7bc48c261ccad7110bbeb84ba44856..4b056c15e90c7f79ed7f37236fd5ace197092114 100644 (file)
@@ -359,7 +359,8 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
                memset(&opt.peakrate, 0, sizeof(opt.peakrate));
        opt.mtu = q->mtu;
        opt.buffer = q->buffer;
-       NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
+       if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest);
        return skb->len;
index d510353ef4310598b031b5122ecb99b2b719ad53..eadb9020cd6450575862c066010b6ca9ccb66010 100644 (file)
@@ -1442,6 +1442,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        long timeo;
        struct scm_cookie tmp_scm;
        int max_level;
+       int data_len = 0;
 
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
@@ -1475,7 +1476,13 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (len > sk->sk_sndbuf - 32)
                goto out;
 
-       skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
+       if (len > SKB_MAX_ALLOC)
+               data_len = min_t(size_t,
+                                len - SKB_MAX_ALLOC,
+                                MAX_SKB_FRAGS * PAGE_SIZE);
+
+       skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
+                                  msg->msg_flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto out;
 
@@ -1485,8 +1492,10 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        max_level = err + 1;
        unix_get_secdata(siocb->scm, skb);
 
-       skb_reset_transport_header(skb);
-       err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+       skb_put(skb, len - data_len);
+       skb->data_len = data_len;
+       skb->len = len;
+       err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
        if (err)
                goto out_free;
 
index f432c57af05d03addc5f856bfff8a784c19f19a1..65622e9c7c5460f2636ee197f86e58d138d94bc9 100644 (file)
@@ -356,20 +356,26 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
 static int nl80211_msg_put_channel(struct sk_buff *msg,
                                   struct ieee80211_channel *chan)
 {
-       NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
-                   chan->center_freq);
+       if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
+                       chan->center_freq))
+               goto nla_put_failure;
 
-       if (chan->flags & IEEE80211_CHAN_DISABLED)
-               NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
-       if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-               NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
-       if (chan->flags & IEEE80211_CHAN_NO_IBSS)
-               NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
-       if (chan->flags & IEEE80211_CHAN_RADAR)
-               NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
+       if ((chan->flags & IEEE80211_CHAN_DISABLED) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED))
+               goto nla_put_failure;
+       if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN))
+               goto nla_put_failure;
+       if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
+               goto nla_put_failure;
+       if ((chan->flags & IEEE80211_CHAN_RADAR) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
+               goto nla_put_failure;
 
-       NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
-                   DBM_TO_MBM(chan->max_power));
+       if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
+                       DBM_TO_MBM(chan->max_power)))
+               goto nla_put_failure;
 
        return 0;
 
@@ -621,8 +627,8 @@ static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
 
        i = 0;
        while (ifmodes) {
-               if (ifmodes & 1)
-                       NLA_PUT_FLAG(msg, i);
+               if ((ifmodes & 1) && nla_put_flag(msg, i))
+                       goto nla_put_failure;
                ifmodes >>= 1;
                i++;
        }
@@ -665,8 +671,9 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
                        nl_limit = nla_nest_start(msg, j + 1);
                        if (!nl_limit)
                                goto nla_put_failure;
-                       NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX,
-                                   c->limits[j].max);
+                       if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
+                                       c->limits[j].max))
+                               goto nla_put_failure;
                        if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
                                                c->limits[j].types))
                                goto nla_put_failure;
@@ -675,13 +682,14 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
 
                nla_nest_end(msg, nl_limits);
 
-               if (c->beacon_int_infra_match)
-                       NLA_PUT_FLAG(msg,
-                               NL80211_IFACE_COMB_STA_AP_BI_MATCH);
-               NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
-                           c->num_different_channels);
-               NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM,
-                           c->max_interfaces);
+               if (c->beacon_int_infra_match &&
+                   nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
+                       goto nla_put_failure;
+               if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
+                               c->num_different_channels) ||
+                   nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
+                               c->max_interfaces))
+                       goto nla_put_failure;
 
                nla_nest_end(msg, nl_combi);
        }
@@ -712,64 +720,74 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        if (!hdr)
                return -1;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
-       NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
-
-       NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
-                   cfg80211_rdev_list_generation);
-
-       NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
-                  dev->wiphy.retry_short);
-       NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
-                  dev->wiphy.retry_long);
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
-                   dev->wiphy.frag_threshold);
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
-                   dev->wiphy.rts_threshold);
-       NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
-                   dev->wiphy.coverage_class);
-       NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
-                  dev->wiphy.max_scan_ssids);
-       NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
-                  dev->wiphy.max_sched_scan_ssids);
-       NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
-                   dev->wiphy.max_scan_ie_len);
-       NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
-                   dev->wiphy.max_sched_scan_ie_len);
-       NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
-                  dev->wiphy.max_match_sets);
-
-       if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
-       if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
-       if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
-       if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
-       if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
-       if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
-
-       NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
-               sizeof(u32) * dev->wiphy.n_cipher_suites,
-               dev->wiphy.cipher_suites);
-
-       NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
-                  dev->wiphy.max_num_pmkids);
-
-       if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
-
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
-                   dev->wiphy.available_antennas_tx);
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
-                   dev->wiphy.available_antennas_rx);
-
-       if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
-               NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
-                           dev->wiphy.probe_resp_offload);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
+           nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) ||
+           nla_put_u32(msg, NL80211_ATTR_GENERATION,
+                       cfg80211_rdev_list_generation) ||
+           nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
+                      dev->wiphy.retry_short) ||
+           nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
+                      dev->wiphy.retry_long) ||
+           nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
+                       dev->wiphy.frag_threshold) ||
+           nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
+                       dev->wiphy.rts_threshold) ||
+           nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
+                      dev->wiphy.coverage_class) ||
+           nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
+                      dev->wiphy.max_scan_ssids) ||
+           nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
+                      dev->wiphy.max_sched_scan_ssids) ||
+           nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
+                       dev->wiphy.max_scan_ie_len) ||
+           nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
+                       dev->wiphy.max_sched_scan_ie_len) ||
+           nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
+                      dev->wiphy.max_match_sets))
+               goto nla_put_failure;
+
+       if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
+           nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
+               goto nla_put_failure;
+       if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
+           nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
+               goto nla_put_failure;
+       if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+           nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
+               goto nla_put_failure;
+       if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
+           nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
+               goto nla_put_failure;
+       if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+           nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
+               goto nla_put_failure;
+       if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
+           nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
+               goto nla_put_failure;
+
+       if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
+                   sizeof(u32) * dev->wiphy.n_cipher_suites,
+                   dev->wiphy.cipher_suites))
+               goto nla_put_failure;
+
+       if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
+                      dev->wiphy.max_num_pmkids))
+               goto nla_put_failure;
+
+       if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
+           nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
+               goto nla_put_failure;
+
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
+                       dev->wiphy.available_antennas_tx) ||
+           nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
+                       dev->wiphy.available_antennas_rx))
+               goto nla_put_failure;
+
+       if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
+           nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+                       dev->wiphy.probe_resp_offload))
+               goto nla_put_failure;
 
        if ((dev->wiphy.available_antennas_tx ||
             dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
@@ -777,8 +795,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                int res;
                res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
                if (!res) {
-                       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant);
-                       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant);
+                       if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
+                                       tx_ant) ||
+                           nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX,
+                                       rx_ant))
+                               goto nla_put_failure;
                }
        }
 
@@ -799,17 +820,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                        goto nla_put_failure;
 
                /* add HT info */
-               if (dev->wiphy.bands[band]->ht_cap.ht_supported) {
-                       NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET,
-                               sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
-                               &dev->wiphy.bands[band]->ht_cap.mcs);
-                       NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA,
-                               dev->wiphy.bands[band]->ht_cap.cap);
-                       NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
-                               dev->wiphy.bands[band]->ht_cap.ampdu_factor);
-                       NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
-                               dev->wiphy.bands[band]->ht_cap.ampdu_density);
-               }
+               if (dev->wiphy.bands[band]->ht_cap.ht_supported &&
+                   (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
+                            sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
+                            &dev->wiphy.bands[band]->ht_cap.mcs) ||
+                    nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
+                                dev->wiphy.bands[band]->ht_cap.cap) ||
+                    nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
+                               dev->wiphy.bands[band]->ht_cap.ampdu_factor) ||
+                    nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
+                               dev->wiphy.bands[band]->ht_cap.ampdu_density)))
+                       goto nla_put_failure;
 
                /* add frequencies */
                nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
@@ -842,11 +863,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                                goto nla_put_failure;
 
                        rate = &dev->wiphy.bands[band]->bitrates[i];
-                       NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE,
-                                   rate->bitrate);
-                       if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
-                               NLA_PUT_FLAG(msg,
-                                       NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE);
+                       if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
+                                       rate->bitrate))
+                               goto nla_put_failure;
+                       if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
+                           nla_put_flag(msg,
+                                        NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
+                               goto nla_put_failure;
 
                        nla_nest_end(msg, nl_rate);
                }
@@ -866,7 +889,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
         do {                                                   \
                if (dev->ops->op) {                             \
                        i++;                                    \
-                       NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \
+                       if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
+                               goto nla_put_failure;           \
                }                                               \
        } while (0)
 
@@ -894,7 +918,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
        if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
                i++;
-               NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
+               if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
+                       goto nla_put_failure;
        }
        CMD(set_channel, SET_CHANNEL);
        CMD(set_wds_peer, SET_WDS_PEER);
@@ -908,7 +933,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        CMD(set_noack_map, SET_NOACK_MAP);
        if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
                i++;
-               NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+               if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
+                       goto nla_put_failure;
        }
 
 #ifdef CONFIG_NL80211_TESTMODE
@@ -919,23 +945,27 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
 
        if (dev->ops->connect || dev->ops->auth) {
                i++;
-               NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT);
+               if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
+                       goto nla_put_failure;
        }
 
        if (dev->ops->disconnect || dev->ops->deauth) {
                i++;
-               NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT);
+               if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
+                       goto nla_put_failure;
        }
 
        nla_nest_end(msg, nl_cmds);
 
        if (dev->ops->remain_on_channel &&
-           dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
-               NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
-                           dev->wiphy.max_remain_on_channel_duration);
+           (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
+           nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
+                       dev->wiphy.max_remain_on_channel_duration))
+               goto nla_put_failure;
 
-       if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
+       if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
+           nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
+               goto nla_put_failure;
 
        if (mgmt_stypes) {
                u16 stypes;
@@ -953,9 +983,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                        i = 0;
                        stypes = mgmt_stypes[ift].tx;
                        while (stypes) {
-                               if (stypes & 1)
-                                       NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
-                                                   (i << 4) | IEEE80211_FTYPE_MGMT);
+                               if ((stypes & 1) &&
+                                   nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+                                               (i << 4) | IEEE80211_FTYPE_MGMT))
+                                       goto nla_put_failure;
                                stypes >>= 1;
                                i++;
                        }
@@ -975,9 +1006,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                        i = 0;
                        stypes = mgmt_stypes[ift].rx;
                        while (stypes) {
-                               if (stypes & 1)
-                                       NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
-                                                   (i << 4) | IEEE80211_FTYPE_MGMT);
+                               if ((stypes & 1) &&
+                                   nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+                                               (i << 4) | IEEE80211_FTYPE_MGMT))
+                                       goto nla_put_failure;
                                stypes >>= 1;
                                i++;
                        }
@@ -994,22 +1026,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                if (!nl_wowlan)
                        goto nla_put_failure;
 
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED);
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
-               if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+               if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+                   ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+                   ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+                   ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
+                   ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+                   ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+                   ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+                   ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+                   goto nla_put_failure;
                if (dev->wiphy.wowlan.n_patterns) {
                        struct nl80211_wowlan_pattern_support pat = {
                                .max_patterns = dev->wiphy.wowlan.n_patterns,
@@ -1018,8 +1051,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                                .max_pattern_len =
                                        dev->wiphy.wowlan.pattern_max_len,
                        };
-                       NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
-                               sizeof(pat), &pat);
+                       if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
+                                   sizeof(pat), &pat))
+                               goto nla_put_failure;
                }
 
                nla_nest_end(msg, nl_wowlan);
@@ -1032,16 +1066,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        if (nl80211_put_iface_combinations(&dev->wiphy, msg))
                goto nla_put_failure;
 
-       if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
-               NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
-                           dev->wiphy.ap_sme_capa);
+       if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
+           nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
+                       dev->wiphy.ap_sme_capa))
+               goto nla_put_failure;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+       if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS,
+                       dev->wiphy.features))
+               goto nla_put_failure;
 
-       if (dev->wiphy.ht_capa_mod_mask)
-               NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
-                       sizeof(*dev->wiphy.ht_capa_mod_mask),
-                       dev->wiphy.ht_capa_mod_mask);
+       if (dev->wiphy.ht_capa_mod_mask &&
+           nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
+                   sizeof(*dev->wiphy.ht_capa_mod_mask),
+                   dev->wiphy.ht_capa_mod_mask))
+               goto nla_put_failure;
 
        return genlmsg_end(msg, hdr);
 
@@ -1489,14 +1527,15 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        if (!hdr)
                return -1;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
-
-       NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
-                   rdev->devlist_generation ^
-                       (cfg80211_rdev_list_generation << 2));
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
+           nla_put_u32(msg, NL80211_ATTR_IFTYPE,
+                       dev->ieee80211_ptr->iftype) ||
+           nla_put_u32(msg, NL80211_ATTR_GENERATION,
+                       rdev->devlist_generation ^
+                       (cfg80211_rdev_list_generation << 2)))
+               goto nla_put_failure;
 
        return genlmsg_end(msg, hdr);
 
@@ -1794,35 +1833,34 @@ static void get_key_callback(void *c, struct key_params *params)
        struct nlattr *key;
        struct get_key_cookie *cookie = c;
 
-       if (params->key)
-               NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA,
-                       params->key_len, params->key);
-
-       if (params->seq)
-               NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ,
-                       params->seq_len, params->seq);
-
-       if (params->cipher)
-               NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
-                           params->cipher);
+       if ((params->key &&
+            nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
+                    params->key_len, params->key)) ||
+           (params->seq &&
+            nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
+                    params->seq_len, params->seq)) ||
+           (params->cipher &&
+            nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
+                        params->cipher)))
+               goto nla_put_failure;
 
        key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY);
        if (!key)
                goto nla_put_failure;
 
-       if (params->key)
-               NLA_PUT(cookie->msg, NL80211_KEY_DATA,
-                       params->key_len, params->key);
-
-       if (params->seq)
-               NLA_PUT(cookie->msg, NL80211_KEY_SEQ,
-                       params->seq_len, params->seq);
-
-       if (params->cipher)
-               NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER,
-                           params->cipher);
+       if ((params->key &&
+            nla_put(cookie->msg, NL80211_KEY_DATA,
+                    params->key_len, params->key)) ||
+           (params->seq &&
+            nla_put(cookie->msg, NL80211_KEY_SEQ,
+                    params->seq_len, params->seq)) ||
+           (params->cipher &&
+            nla_put_u32(cookie->msg, NL80211_KEY_CIPHER,
+                        params->cipher)))
+               goto nla_put_failure;
 
-       NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx);
+       if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
+               goto nla_put_failure;
 
        nla_nest_end(cookie->msg, key);
 
@@ -1880,10 +1918,12 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        cookie.msg = msg;
        cookie.idx = key_idx;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx);
-       if (mac_addr)
-               NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx))
+               goto nla_put_failure;
+       if (mac_addr &&
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+               goto nla_put_failure;
 
        if (pairwise && mac_addr &&
            !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -2373,15 +2413,15 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
 
        /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
        bitrate = cfg80211_calculate_bitrate(info);
-       if (bitrate > 0)
-               NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
-
-       if (info->flags & RATE_INFO_FLAGS_MCS)
-               NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
-       if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
-               NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
-       if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
-               NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
+       if ((bitrate > 0 &&
+            nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
+           ((info->flags & RATE_INFO_FLAGS_MCS) &&
+            nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
+           ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
+            nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) ||
+           ((info->flags & RATE_INFO_FLAGS_SHORT_GI) &&
+            nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)))
+               goto nla_put_failure;
 
        nla_nest_end(msg, rate);
        return true;
@@ -2403,43 +2443,50 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
        if (!hdr)
                return -1;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
-
-       NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation);
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
+           nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation))
+               goto nla_put_failure;
 
        sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
        if (!sinfoattr)
                goto nla_put_failure;
-       if (sinfo->filled & STATION_INFO_CONNECTED_TIME)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME,
-                           sinfo->connected_time);
-       if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
-                           sinfo->inactive_time);
-       if (sinfo->filled & STATION_INFO_RX_BYTES)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES,
-                           sinfo->rx_bytes);
-       if (sinfo->filled & STATION_INFO_TX_BYTES)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES,
-                           sinfo->tx_bytes);
-       if (sinfo->filled & STATION_INFO_LLID)
-               NLA_PUT_U16(msg, NL80211_STA_INFO_LLID,
-                           sinfo->llid);
-       if (sinfo->filled & STATION_INFO_PLID)
-               NLA_PUT_U16(msg, NL80211_STA_INFO_PLID,
-                           sinfo->plid);
-       if (sinfo->filled & STATION_INFO_PLINK_STATE)
-               NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE,
-                           sinfo->plink_state);
+       if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) &&
+           nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME,
+                       sinfo->connected_time))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) &&
+           nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
+                       sinfo->inactive_time))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_RX_BYTES) &&
+           nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
+                       sinfo->rx_bytes))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_TX_BYTES) &&
+           nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
+                       sinfo->tx_bytes))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_LLID) &&
+           nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_PLID) &&
+           nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_PLINK_STATE) &&
+           nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE,
+                      sinfo->plink_state))
+               goto nla_put_failure;
        switch (rdev->wiphy.signal_type) {
        case CFG80211_SIGNAL_TYPE_MBM:
-               if (sinfo->filled & STATION_INFO_SIGNAL)
-                       NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
-                                  sinfo->signal);
-               if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
-                       NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
-                                  sinfo->signal_avg);
+               if ((sinfo->filled & STATION_INFO_SIGNAL) &&
+                   nla_put_u8(msg, NL80211_STA_INFO_SIGNAL,
+                              sinfo->signal))
+                       goto nla_put_failure;
+               if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) &&
+                   nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG,
+                              sinfo->signal_avg))
+                       goto nla_put_failure;
                break;
        default:
                break;
@@ -2454,49 +2501,56 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
                                          NL80211_STA_INFO_RX_BITRATE))
                        goto nla_put_failure;
        }
-       if (sinfo->filled & STATION_INFO_RX_PACKETS)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
-                           sinfo->rx_packets);
-       if (sinfo->filled & STATION_INFO_TX_PACKETS)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS,
-                           sinfo->tx_packets);
-       if (sinfo->filled & STATION_INFO_TX_RETRIES)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES,
-                           sinfo->tx_retries);
-       if (sinfo->filled & STATION_INFO_TX_FAILED)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
-                           sinfo->tx_failed);
-       if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT)
-               NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS,
-                           sinfo->beacon_loss_count);
+       if ((sinfo->filled & STATION_INFO_RX_PACKETS) &&
+           nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS,
+                       sinfo->rx_packets))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_TX_PACKETS) &&
+           nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS,
+                       sinfo->tx_packets))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_TX_RETRIES) &&
+           nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES,
+                       sinfo->tx_retries))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_TX_FAILED) &&
+           nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
+                       sinfo->tx_failed))
+               goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
+           nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
+                       sinfo->beacon_loss_count))
+               goto nla_put_failure;
        if (sinfo->filled & STATION_INFO_BSS_PARAM) {
                bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
                if (!bss_param)
                        goto nla_put_failure;
 
-               if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT)
-                       NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT);
-               if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE)
-                       NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE);
-               if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME)
-                       NLA_PUT_FLAG(msg,
-                                    NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME);
-               NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
-                          sinfo->bss_param.dtim_period);
-               NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
-                           sinfo->bss_param.beacon_interval);
+               if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) &&
+                    nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
+                   ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
+                    nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
+                   ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
+                    nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
+                   nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
+                              sinfo->bss_param.dtim_period) ||
+                   nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
+                               sinfo->bss_param.beacon_interval))
+                       goto nla_put_failure;
 
                nla_nest_end(msg, bss_param);
        }
-       if (sinfo->filled & STATION_INFO_STA_FLAGS)
-               NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
-                       sizeof(struct nl80211_sta_flag_update),
-                       &sinfo->sta_flags);
+       if ((sinfo->filled & STATION_INFO_STA_FLAGS) &&
+           nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
+                   sizeof(struct nl80211_sta_flag_update),
+                   &sinfo->sta_flags))
+               goto nla_put_failure;
        nla_nest_end(msg, sinfoattr);
 
-       if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
-               NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
-                       sinfo->assoc_req_ies);
+       if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) &&
+           nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+                   sinfo->assoc_req_ies))
+               goto nla_put_failure;
 
        return genlmsg_end(msg, hdr);
 
@@ -2918,36 +2972,37 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
        if (!hdr)
                return -1;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst);
-       NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop);
-
-       NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation);
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) ||
+           nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) ||
+           nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation))
+               goto nla_put_failure;
 
        pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
        if (!pinfoattr)
                goto nla_put_failure;
-       if (pinfo->filled & MPATH_INFO_FRAME_QLEN)
-               NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
-                           pinfo->frame_qlen);
-       if (pinfo->filled & MPATH_INFO_SN)
-               NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN,
-                           pinfo->sn);
-       if (pinfo->filled & MPATH_INFO_METRIC)
-               NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC,
-                           pinfo->metric);
-       if (pinfo->filled & MPATH_INFO_EXPTIME)
-               NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME,
-                           pinfo->exptime);
-       if (pinfo->filled & MPATH_INFO_FLAGS)
-               NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS,
-                           pinfo->flags);
-       if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT)
-               NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
-                           pinfo->discovery_timeout);
-       if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES)
-               NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
-                           pinfo->discovery_retries);
+       if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) &&
+           nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
+                       pinfo->frame_qlen))
+               goto nla_put_failure;
+       if (((pinfo->filled & MPATH_INFO_SN) &&
+            nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) ||
+           ((pinfo->filled & MPATH_INFO_METRIC) &&
+            nla_put_u32(msg, NL80211_MPATH_INFO_METRIC,
+                        pinfo->metric)) ||
+           ((pinfo->filled & MPATH_INFO_EXPTIME) &&
+            nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME,
+                        pinfo->exptime)) ||
+           ((pinfo->filled & MPATH_INFO_FLAGS) &&
+            nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS,
+                       pinfo->flags)) ||
+           ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) &&
+            nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
+                        pinfo->discovery_timeout)) ||
+           ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) &&
+            nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
+                       pinfo->discovery_retries)))
+               goto nla_put_failure;
 
        nla_nest_end(msg, pinfoattr);
 
@@ -3273,47 +3328,48 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
        pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
        if (!pinfoattr)
                goto nla_put_failure;
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
-                       cur_params.dot11MeshRetryTimeout);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
-                       cur_params.dot11MeshConfirmTimeout);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
-                       cur_params.dot11MeshHoldingTimeout);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
-                       cur_params.dot11MeshMaxPeerLinks);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES,
-                       cur_params.dot11MeshMaxRetries);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_TTL,
-                       cur_params.dot11MeshTTL);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL,
-                       cur_params.element_ttl);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
-                       cur_params.auto_open_plinks);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
-                       cur_params.dot11MeshHWMPmaxPREQretries);
-       NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
-                       cur_params.path_refresh_time);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
-                       cur_params.min_discovery_timeout);
-       NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
-                       cur_params.dot11MeshHWMPactivePathTimeout);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
-                       cur_params.dot11MeshHWMPpreqMinInterval);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
-                       cur_params.dot11MeshHWMPperrMinInterval);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
-                       cur_params.dot11MeshHWMPnetDiameterTraversalTime);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
-                       cur_params.dot11MeshHWMPRootMode);
-       NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
-                       cur_params.dot11MeshHWMPRannInterval);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
-                       cur_params.dot11MeshGateAnnouncementProtocol);
-       NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING,
-                       cur_params.dot11MeshForwarding);
-       NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
-                       cur_params.rssi_threshold);
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
+                       cur_params.dot11MeshRetryTimeout) ||
+           nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
+                       cur_params.dot11MeshConfirmTimeout) ||
+           nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
+                       cur_params.dot11MeshHoldingTimeout) ||
+           nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
+                       cur_params.dot11MeshMaxPeerLinks) ||
+           nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES,
+                      cur_params.dot11MeshMaxRetries) ||
+           nla_put_u8(msg, NL80211_MESHCONF_TTL,
+                      cur_params.dot11MeshTTL) ||
+           nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL,
+                      cur_params.element_ttl) ||
+           nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+                      cur_params.auto_open_plinks) ||
+           nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+                      cur_params.dot11MeshHWMPmaxPREQretries) ||
+           nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
+                       cur_params.path_refresh_time) ||
+           nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+                       cur_params.min_discovery_timeout) ||
+           nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
+                       cur_params.dot11MeshHWMPactivePathTimeout) ||
+           nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
+                       cur_params.dot11MeshHWMPpreqMinInterval) ||
+           nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+                       cur_params.dot11MeshHWMPperrMinInterval) ||
+           nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
+                       cur_params.dot11MeshHWMPnetDiameterTraversalTime) ||
+           nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
+                      cur_params.dot11MeshHWMPRootMode) ||
+           nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+                       cur_params.dot11MeshHWMPRannInterval) ||
+           nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+                      cur_params.dot11MeshGateAnnouncementProtocol) ||
+           nla_put_u8(msg, NL80211_MESHCONF_FORWARDING,
+                      cur_params.dot11MeshForwarding) ||
+           nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
+                       cur_params.rssi_threshold))
+               goto nla_put_failure;
        nla_nest_end(msg, pinfoattr);
        genlmsg_end(msg, hdr);
        return genlmsg_reply(msg, info);
@@ -3544,11 +3600,12 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
        if (!hdr)
                goto put_failure;
 
-       NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
-               cfg80211_regdomain->alpha2);
-       if (cfg80211_regdomain->dfs_region)
-               NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
-                          cfg80211_regdomain->dfs_region);
+       if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+                          cfg80211_regdomain->alpha2) ||
+           (cfg80211_regdomain->dfs_region &&
+            nla_put_u8(msg, NL80211_ATTR_DFS_REGION,
+                       cfg80211_regdomain->dfs_region)))
+               goto nla_put_failure;
 
        nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
        if (!nl_reg_rules)
@@ -3568,18 +3625,19 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
                if (!nl_reg_rule)
                        goto nla_put_failure;
 
-               NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS,
-                       reg_rule->flags);
-               NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START,
-                       freq_range->start_freq_khz);
-               NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END,
-                       freq_range->end_freq_khz);
-               NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
-                       freq_range->max_bandwidth_khz);
-               NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
-                       power_rule->max_antenna_gain);
-               NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
-                       power_rule->max_eirp);
+               if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
+                               reg_rule->flags) ||
+                   nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
+                               freq_range->start_freq_khz) ||
+                   nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
+                               freq_range->end_freq_khz) ||
+                   nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
+                               freq_range->max_bandwidth_khz) ||
+                   nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
+                               power_rule->max_antenna_gain) ||
+                   nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
+                               power_rule->max_eirp))
+                       goto nla_put_failure;
 
                nla_nest_end(msg, nl_reg_rule);
        }
@@ -4150,37 +4208,44 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 
        genl_dump_check_consistent(cb, hdr, &nl80211_fam);
 
-       NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex);
+       if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
+               goto nla_put_failure;
 
        bss = nla_nest_start(msg, NL80211_ATTR_BSS);
        if (!bss)
                goto nla_put_failure;
-       if (!is_zero_ether_addr(res->bssid))
-               NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid);
-       if (res->information_elements && res->len_information_elements)
-               NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
-                       res->len_information_elements,
-                       res->information_elements);
-       if (res->beacon_ies && res->len_beacon_ies &&
-           res->beacon_ies != res->information_elements)
-               NLA_PUT(msg, NL80211_BSS_BEACON_IES,
-                       res->len_beacon_ies, res->beacon_ies);
-       if (res->tsf)
-               NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
-       if (res->beacon_interval)
-               NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval);
-       NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability);
-       NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq);
-       NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO,
-               jiffies_to_msecs(jiffies - intbss->ts));
+       if ((!is_zero_ether_addr(res->bssid) &&
+            nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) ||
+           (res->information_elements && res->len_information_elements &&
+            nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
+                    res->len_information_elements,
+                    res->information_elements)) ||
+           (res->beacon_ies && res->len_beacon_ies &&
+            res->beacon_ies != res->information_elements &&
+            nla_put(msg, NL80211_BSS_BEACON_IES,
+                    res->len_beacon_ies, res->beacon_ies)))
+               goto nla_put_failure;
+       if (res->tsf &&
+           nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
+               goto nla_put_failure;
+       if (res->beacon_interval &&
+           nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
+               goto nla_put_failure;
+       if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
+           nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+           nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
+                       jiffies_to_msecs(jiffies - intbss->ts)))
+               goto nla_put_failure;
 
        switch (rdev->wiphy.signal_type) {
        case CFG80211_SIGNAL_TYPE_MBM:
-               NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal);
+               if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
+                       goto nla_put_failure;
                break;
        case CFG80211_SIGNAL_TYPE_UNSPEC:
-               NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal);
+               if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
+                       goto nla_put_failure;
                break;
        default:
                break;
@@ -4189,14 +4254,16 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
        switch (wdev->iftype) {
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_STATION:
-               if (intbss == wdev->current_bss)
-                       NLA_PUT_U32(msg, NL80211_BSS_STATUS,
-                                   NL80211_BSS_STATUS_ASSOCIATED);
+               if (intbss == wdev->current_bss &&
+                   nla_put_u32(msg, NL80211_BSS_STATUS,
+                               NL80211_BSS_STATUS_ASSOCIATED))
+                       goto nla_put_failure;
                break;
        case NL80211_IFTYPE_ADHOC:
-               if (intbss == wdev->current_bss)
-                       NLA_PUT_U32(msg, NL80211_BSS_STATUS,
-                                   NL80211_BSS_STATUS_IBSS_JOINED);
+               if (intbss == wdev->current_bss &&
+                   nla_put_u32(msg, NL80211_BSS_STATUS,
+                               NL80211_BSS_STATUS_IBSS_JOINED))
+                       goto nla_put_failure;
                break;
        default:
                break;
@@ -4265,34 +4332,43 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
        if (!hdr)
                return -ENOMEM;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
+               goto nla_put_failure;
 
        infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
        if (!infoattr)
                goto nla_put_failure;
 
-       NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY,
-                   survey->channel->center_freq);
-       if (survey->filled & SURVEY_INFO_NOISE_DBM)
-               NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE,
-                           survey->noise);
-       if (survey->filled & SURVEY_INFO_IN_USE)
-               NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE);
-       if (survey->filled & SURVEY_INFO_CHANNEL_TIME)
-               NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
-                           survey->channel_time);
-       if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
-               NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
-                           survey->channel_time_busy);
-       if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
-               NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
-                           survey->channel_time_ext_busy);
-       if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX)
-               NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
-                           survey->channel_time_rx);
-       if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX)
-               NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
-                           survey->channel_time_tx);
+       if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
+                       survey->channel->center_freq))
+               goto nla_put_failure;
+
+       if ((survey->filled & SURVEY_INFO_NOISE_DBM) &&
+           nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise))
+               goto nla_put_failure;
+       if ((survey->filled & SURVEY_INFO_IN_USE) &&
+           nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
+               goto nla_put_failure;
+       if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) &&
+           nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
+                       survey->channel_time))
+               goto nla_put_failure;
+       if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) &&
+           nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
+                       survey->channel_time_busy))
+               goto nla_put_failure;
+       if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) &&
+           nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
+                       survey->channel_time_ext_busy))
+               goto nla_put_failure;
+       if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) &&
+           nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
+                       survey->channel_time_rx))
+               goto nla_put_failure;
+       if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) &&
+           nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
+                       survey->channel_time_tx))
+               goto nla_put_failure;
 
        nla_nest_end(msg, infoattr);
 
@@ -4973,7 +5049,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                                           NL80211_CMD_TESTMODE);
                struct nlattr *tmdata;
 
-               if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
+               if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
                        genlmsg_cancel(skb, hdr);
                        break;
                }
@@ -5024,7 +5100,8 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
                return NULL;
        }
 
-       NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+       if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
+               goto nla_put_failure;
        data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
 
        ((void **)skb->cb)[0] = rdev;
@@ -5403,7 +5480,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
        if (err)
                goto free_msg;
 
-       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+       if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -5690,7 +5768,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
                goto free_msg;
 
        if (msg) {
-               NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+               if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+                       goto nla_put_failure;
 
                genlmsg_end(msg, hdr);
                return genlmsg_reply(msg, info);
@@ -5795,7 +5874,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
        else
                ps_state = NL80211_PS_DISABLED;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state);
+       if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
        return genlmsg_reply(msg, info);
@@ -5942,20 +6022,21 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
                if (!nl_wowlan)
                        goto nla_put_failure;
 
-               if (rdev->wowlan->any)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
-               if (rdev->wowlan->disconnect)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
-               if (rdev->wowlan->magic_pkt)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
-               if (rdev->wowlan->gtk_rekey_failure)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
-               if (rdev->wowlan->eap_identity_req)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
-               if (rdev->wowlan->four_way_handshake)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
-               if (rdev->wowlan->rfkill_release)
-                       NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+               if ((rdev->wowlan->any &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+                   (rdev->wowlan->disconnect &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+                   (rdev->wowlan->magic_pkt &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+                   (rdev->wowlan->gtk_rekey_failure &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+                   (rdev->wowlan->eap_identity_req &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+                   (rdev->wowlan->four_way_handshake &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+                   (rdev->wowlan->rfkill_release &&
+                    nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+                       goto nla_put_failure;
                if (rdev->wowlan->n_patterns) {
                        struct nlattr *nl_pats, *nl_pat;
                        int i, pat_len;
@@ -5970,12 +6051,13 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
                                if (!nl_pat)
                                        goto nla_put_failure;
                                pat_len = rdev->wowlan->patterns[i].pattern_len;
-                               NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK,
-                                       DIV_ROUND_UP(pat_len, 8),
-                                       rdev->wowlan->patterns[i].mask);
-                               NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
-                                       pat_len,
-                                       rdev->wowlan->patterns[i].pattern);
+                               if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
+                                           DIV_ROUND_UP(pat_len, 8),
+                                           rdev->wowlan->patterns[i].mask) ||
+                                   nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
+                                           pat_len,
+                                           rdev->wowlan->patterns[i].pattern))
+                                       goto nla_put_failure;
                                nla_nest_end(msg, nl_pat);
                        }
                        nla_nest_end(msg, nl_pats);
@@ -6248,7 +6330,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
        if (err)
                goto free_msg;
 
-       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+       if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -6916,19 +6999,24 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
        nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
        if (!nest)
                goto nla_put_failure;
-       for (i = 0; i < req->n_ssids; i++)
-               NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid);
+       for (i = 0; i < req->n_ssids; i++) {
+               if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
+                       goto nla_put_failure;
+       }
        nla_nest_end(msg, nest);
 
        nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
        if (!nest)
                goto nla_put_failure;
-       for (i = 0; i < req->n_channels; i++)
-               NLA_PUT_U32(msg, i, req->channels[i]->center_freq);
+       for (i = 0; i < req->n_channels; i++) {
+               if (nla_put_u32(msg, i, req->channels[i]->center_freq))
+                       goto nla_put_failure;
+       }
        nla_nest_end(msg, nest);
 
-       if (req->ie)
-               NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie);
+       if (req->ie &&
+           nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
+               goto nla_put_failure;
 
        return 0;
  nla_put_failure:
@@ -6947,8 +7035,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
        if (!hdr)
                return -1;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+               goto nla_put_failure;
 
        /* ignore errors and send incomplete event anyway */
        nl80211_add_scan_req(msg, rdev);
@@ -6972,8 +7061,9 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
        if (!hdr)
                return -1;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+               goto nla_put_failure;
 
        return genlmsg_end(msg, hdr);
 
@@ -7096,26 +7186,33 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
        }
 
        /* Userspace can always count this one always being set */
-       NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator);
-
-       if (request->alpha2[0] == '0' && request->alpha2[1] == '0')
-               NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-                          NL80211_REGDOM_TYPE_WORLD);
-       else if (request->alpha2[0] == '9' && request->alpha2[1] == '9')
-               NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-                          NL80211_REGDOM_TYPE_CUSTOM_WORLD);
-       else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
-                request->intersect)
-               NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-                          NL80211_REGDOM_TYPE_INTERSECTION);
-       else {
-               NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-                          NL80211_REGDOM_TYPE_COUNTRY);
-               NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2);
-       }
-
-       if (wiphy_idx_valid(request->wiphy_idx))
-               NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
+       if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator))
+               goto nla_put_failure;
+
+       if (request->alpha2[0] == '0' && request->alpha2[1] == '0') {
+               if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+                              NL80211_REGDOM_TYPE_WORLD))
+                       goto nla_put_failure;
+       } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') {
+               if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+                              NL80211_REGDOM_TYPE_CUSTOM_WORLD))
+                       goto nla_put_failure;
+       } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
+                  request->intersect) {
+               if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+                              NL80211_REGDOM_TYPE_INTERSECTION))
+                       goto nla_put_failure;
+       } else {
+               if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+                              NL80211_REGDOM_TYPE_COUNTRY) ||
+                   nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+                                  request->alpha2))
+                       goto nla_put_failure;
+       }
+
+       if (wiphy_idx_valid(request->wiphy_idx) &&
+           nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7149,9 +7246,10 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7229,10 +7327,11 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7280,15 +7379,15 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       if (bssid)
-               NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
-       NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status);
-       if (req_ie)
-               NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
-       if (resp_ie)
-               NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+           nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
+           (req_ie &&
+            nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+           (resp_ie &&
+            nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7320,13 +7419,14 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
-       if (req_ie)
-               NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
-       if (resp_ie)
-               NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
+           (req_ie &&
+            nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+           (resp_ie &&
+            nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7357,14 +7457,14 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       if (from_ap && reason)
-               NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason);
-       if (from_ap)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP);
-       if (ie)
-               NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           (from_ap && reason &&
+            nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
+           (from_ap &&
+            nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
+           (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7395,9 +7495,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7428,11 +7529,12 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr);
-       if (ie_len && ie)
-               NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) ||
+           (ie_len && ie &&
+            nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7463,15 +7565,14 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       if (addr)
-               NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
-       NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
-       if (key_id != -1)
-               NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
-       if (tsc)
-               NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) ||
+           nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) ||
+           (key_id != -1 &&
+            nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) ||
+           (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc)))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7506,7 +7607,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
         * Since we are applying the beacon hint to a wiphy we know its
         * wiphy_idx is valid
         */
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy));
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
+               goto nla_put_failure;
 
        /* Before */
        nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
@@ -7558,14 +7660,16 @@ static void nl80211_send_remain_on_chan_event(
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
-       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
+           nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
+           nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+               goto nla_put_failure;
 
-       if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
-               NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
+       if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
+           nla_put_u32(msg, NL80211_ATTR_DURATION, duration))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7636,8 +7740,9 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7673,9 +7778,10 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
                return true;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+               goto nla_put_failure;
 
        err = genlmsg_end(msg, hdr);
        if (err < 0) {
@@ -7724,12 +7830,13 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
                return -ENOMEM;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
-       if (sig_dbm)
-               NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
-       NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
+           (sig_dbm &&
+            nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+           nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7759,12 +7866,12 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
-       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
-       if (ack)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+           nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+           (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
@@ -7796,15 +7903,17 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+               goto nla_put_failure;
 
        pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
        if (!pinfoattr)
                goto nla_put_failure;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
-                   rssi_event);
+       if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+                       rssi_event))
+               goto nla_put_failure;
 
        nla_nest_end(msg, pinfoattr);
 
@@ -7837,16 +7946,18 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+               goto nla_put_failure;
 
        rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA);
        if (!rekey_attr)
                goto nla_put_failure;
 
-       NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR,
-               NL80211_REPLAY_CTR_LEN, replay_ctr);
+       if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR,
+                   NL80211_REPLAY_CTR_LEN, replay_ctr))
+               goto nla_put_failure;
 
        nla_nest_end(msg, rekey_attr);
 
@@ -7879,17 +7990,19 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+               goto nla_put_failure;
 
        attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
        if (!attr)
                goto nla_put_failure;
 
-       NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
-       NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
-       if (preauth)
-               NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
+       if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) ||
+           nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) ||
+           (preauth &&
+            nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH)))
+               goto nla_put_failure;
 
        nla_nest_end(msg, attr);
 
@@ -7923,15 +8036,17 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
+               goto nla_put_failure;
 
        pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
        if (!pinfoattr)
                goto nla_put_failure;
 
-       NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets);
+       if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
+               goto nla_put_failure;
 
        nla_nest_end(msg, pinfoattr);
 
@@ -7965,12 +8080,12 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
-       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
-       if (acked)
-               NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
+           nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+           (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
+               goto nla_put_failure;
 
        err = genlmsg_end(msg, hdr);
        if (err < 0) {
@@ -8010,12 +8125,13 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
                return;
        }
 
-       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-       if (freq)
-               NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
-       if (sig_dbm)
-               NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
-       NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           (freq &&
+            nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
+           (sig_dbm &&
+            nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+           nla_put(msg, NL80211_ATTR_FRAME, len, frame))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
 
index af648e08e61b7f279dd99b8e87797148d746dd2a..22adfebaad273024e9c301dce118633e91ba5916 100644 (file)
@@ -402,7 +402,8 @@ static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev,
        r->ifi_flags = dev_get_flags(dev);
        r->ifi_change = 0;      /* Wireless changes don't affect those flags */
 
-       NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
+       if (nla_put_string(skb, IFLA_IFNAME, dev->name))
+               goto nla_put_failure;
 
        return nlh;
  nla_put_failure:
index 7128dde0fe1a85d7c62fb6d51f04c6eec10986e6..44293b3fd6a1d1d4dc2e6ae47f0cdb91b2226834 100644 (file)
@@ -756,40 +756,50 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
 {
        copy_to_user_state(x, p);
 
-       if (x->coaddr)
-               NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
+       if (x->coaddr &&
+           nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
+               goto nla_put_failure;
 
-       if (x->lastused)
-               NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
+       if (x->lastused &&
+           nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
+               goto nla_put_failure;
 
-       if (x->aead)
-               NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
-       if (x->aalg) {
-               if (copy_to_user_auth(x->aalg, skb))
-                       goto nla_put_failure;
+       if (x->aead &&
+           nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
+               goto nla_put_failure;
 
-               NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
-                       xfrm_alg_auth_len(x->aalg), x->aalg);
-       }
-       if (x->ealg)
-               NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
-       if (x->calg)
-               NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+       if (x->aalg &&
+           (copy_to_user_auth(x->aalg, skb) ||
+            nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
+                    xfrm_alg_auth_len(x->aalg), x->aalg)))
+               goto nla_put_failure;
 
-       if (x->encap)
-               NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+       if (x->ealg &&
+           nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
+               goto nla_put_failure;
 
-       if (x->tfcpad)
-               NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
+       if (x->calg &&
+           nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
+               goto nla_put_failure;
+
+       if (x->encap &&
+           nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
+               goto nla_put_failure;
+
+       if (x->tfcpad &&
+           nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
+               goto nla_put_failure;
 
        if (xfrm_mark_put(skb, &x->mark))
                goto nla_put_failure;
 
-       if (x->replay_esn)
-               NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
-                       xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
+       if (x->replay_esn &&
+           nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+                   xfrm_replay_state_esn_len(x->replay_esn),
+                   x->replay_esn))
+               goto nla_put_failure;
 
-       if (x->security && copy_sec_ctx(x->security, skb) < 0)
+       if (x->security && copy_sec_ctx(x->security, skb))
                goto nla_put_failure;
 
        return 0;
@@ -912,8 +922,9 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
        sph.spdhcnt = si.spdhcnt;
        sph.spdhmcnt = si.spdhmcnt;
 
-       NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
-       NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
+       if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
+           nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
+               goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);
 
@@ -967,8 +978,9 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
        sh.sadhmcnt = si.sadhmcnt;
        sh.sadhcnt = si.sadhcnt;
 
-       NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
-       NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
+       if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
+           nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
+               goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);
 
@@ -1690,21 +1702,27 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
        id->reqid = x->props.reqid;
        id->flags = c->data.aevent;
 
-       if (x->replay_esn)
-               NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
-                       xfrm_replay_state_esn_len(x->replay_esn),
-                       x->replay_esn);
-       else
-               NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
-
-       NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+       if (x->replay_esn) {
+               if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+                           xfrm_replay_state_esn_len(x->replay_esn),
+                           x->replay_esn))
+                       goto nla_put_failure;
+       } else {
+               if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
+                           &x->replay))
+                       goto nla_put_failure;
+       }
+       if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
+               goto nla_put_failure;
 
-       if (id->flags & XFRM_AE_RTHR)
-               NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
+       if ((id->flags & XFRM_AE_RTHR) &&
+           nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
+               goto nla_put_failure;
 
-       if (id->flags & XFRM_AE_ETHR)
-               NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
-                           x->replay_maxage * 10 / HZ);
+       if ((id->flags & XFRM_AE_ETHR) &&
+           nla_put_u32(skb, XFRMA_ETIMER_THRESH,
+                       x->replay_maxage * 10 / HZ))
+               goto nla_put_failure;
 
        if (xfrm_mark_put(skb, &x->mark))
                goto nla_put_failure;
@@ -2835,8 +2853,9 @@ static int build_report(struct sk_buff *skb, u8 proto,
        ur->proto = proto;
        memcpy(&ur->sel, sel, sizeof(ur->sel));
 
-       if (addr)
-               NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
+       if (addr &&
+           nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
+               goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);