]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorJohn W. Linville <linville@tuxdriver.com>
Fri, 28 Jun 2013 17:18:21 +0000 (13:18 -0400)
committerJohn W. Linville <linville@tuxdriver.com>
Fri, 28 Jun 2013 17:18:21 +0000 (13:18 -0400)
Conflicts:
net/wireless/nl80211.c

1971 files changed:
Documentation/bcache.txt
Documentation/devices.txt
Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/arc_emac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/davicom-dm9000.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/net/marvell-orion-net.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/micrel-ks8851.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/via-velocity.txt [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/bindings/video/exynos_hdmi.txt [moved from Documentation/devicetree/bindings/drm/exynos/hdmi.txt with 100% similarity]
Documentation/devicetree/bindings/video/exynos_hdmiddc.txt [moved from Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt with 100% similarity]
Documentation/devicetree/bindings/video/exynos_hdmiphy.txt [moved from Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt with 100% similarity]
Documentation/devicetree/bindings/video/exynos_mixer.txt [moved from Documentation/devicetree/bindings/drm/exynos/mixer.txt with 100% similarity]
Documentation/devicetree/bindings/video/simple-framebuffer.txt [new file with mode: 0644]
Documentation/devicetree/usage-model.txt
Documentation/dmatest.txt
Documentation/filesystems/xfs.txt
Documentation/kernel-parameters.txt
Documentation/kernel-per-CPU-kthreads.txt [new file with mode: 0644]
Documentation/m68k/kernel-options.txt
Documentation/networking/.gitignore
Documentation/networking/00-INDEX
Documentation/networking/Makefile
Documentation/networking/bonding.txt
Documentation/networking/ifenslave.c [deleted file]
Documentation/networking/ip-sysctl.txt
Documentation/networking/netlink_mmap.txt
Documentation/networking/packet_mmap.txt
Documentation/networking/scaling.txt
Documentation/power/devices.txt
Documentation/power/interface.txt
Documentation/power/notifiers.txt
Documentation/power/states.txt
Documentation/powerpc/transactional_memory.txt
Documentation/rapidio/rapidio.txt
Documentation/rapidio/sysfs.txt
Documentation/sysctl/net.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/uapi/asm/socket.h
arch/arc/boot/dts/abilis_tb100_dvk.dts
arch/arc/boot/dts/abilis_tb101_dvk.dts
arch/arc/boot/dts/abilis_tb10x.dtsi
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/tlb.h
arch/arc/mm/cache_arc700.c
arch/arc/mm/tlb.c
arch/arc/mm/tlbex.S
arch/arc/plat-tb10x/tb10x.c
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/debug.S
arch/arm/boot/compressed/head-sa1100.S
arch/arm/boot/compressed/head-shark.S
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone.dts
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/armada-370-xp.dtsi
arch/arm/boot/dts/armada-370.dtsi
arch/arm/boot/dts/armada-xp-gp.dts
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/armada-xp.dtsi
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x25ek.dts
arch/arm/boot/dts/bcm2835.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4-panda-common.dtsi
arch/arm/boot/dts/omap4-sdp.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/boot/dts/sama5d3xcm.dtsi
arch/arm/boot/dts/ste-nomadik-s8815.dts
arch/arm/boot/dts/sun4i-a10-cubieboard.dts
arch/arm/boot/dts/sun4i-a10-hackberry.dts
arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/common/mcpm_platsmp.c
arch/arm/configs/exynos_defconfig
arch/arm/configs/omap1_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/tegra_defconfig
arch/arm/crypto/sha1-armv4-large.S
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/percpu.h
arch/arm/include/asm/tlb.h
arch/arm/include/debug/ux500.S
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/kernel/topology.c
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mach-at91/at91rm9200_time.c
arch/arm/mach-at91/at91sam9n12.c
arch/arm/mach-at91/include/mach/at91_pmc.h
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/common.c
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/include/mach/pm-core.h
arch/arm/mach-exynos/mach-universal_c210.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/devices/Kconfig
arch/arm/mach-imx/headsmp.S
arch/arm/mach-imx/platsmp.c
arch/arm/mach-kirkwood/board-ts219.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/mpp.c
arch/arm/mach-kirkwood/ts219-setup.c
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-mvebu/armada-370-xp.c
arch/arm/mach-mvebu/coherency_ll.S
arch/arm/mach-mxs/Kconfig
arch/arm/mach-omap1/dma.c
arch/arm/mach-omap2/cclock33xx_data.c
arch/arm/mach-omap2/clock36xx.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_33xx_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-prima2/pm.c
arch/arm/mach-prima2/rstc.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-marzen.c
arch/arm/mach-shmobile/clock-r8a7740.c
arch/arm/mach-shmobile/clock-r8a7778.c
arch/arm/mach-shmobile/clock-r8a7779.c
arch/arm/mach-shmobile/setup-sh73a0.c
arch/arm/mach-sunxi/Kconfig
arch/arm/mach-tegra/tegra2_emc.c
arch/arm/mach-ux500/Kconfig
arch/arm/mach-ux500/board-mop500-regulators.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-ux500/cpuidle.c
arch/arm/mach-ux500/setup.h
arch/arm/mach-vt8500/vt8500.c
arch/arm/net/bpf_jit_32.c
arch/arm/plat-orion/common.c
arch/arm/plat-orion/include/plat/common.h
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/include/plat/uncompress.h
arch/arm/plat-samsung/pm.c
arch/arm/vfp/entry.S
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/include/asm/assembler.h
arch/arm64/kernel/arm64ksyms.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/early_printk.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/setup.c
arch/arm64/kernel/traps.c
arch/arm64/mm/cache.S
arch/arm64/mm/fault.c
arch/arm64/mm/proc.S
arch/avr32/Kconfig
arch/avr32/include/asm/Kbuild
arch/avr32/include/asm/numnodes.h [deleted file]
arch/avr32/include/asm/param.h [deleted file]
arch/avr32/include/uapi/asm/Kbuild
arch/avr32/include/uapi/asm/param.h [deleted file]
arch/avr32/include/uapi/asm/socket.h
arch/avr32/kernel/module.c
arch/cris/arch-v10/drivers/Kconfig
arch/cris/arch-v32/drivers/Kconfig
arch/cris/include/uapi/asm/socket.h
arch/frv/include/uapi/asm/socket.h
arch/h8300/include/uapi/asm/socket.h
arch/ia64/hp/sim/simeth.c
arch/ia64/include/asm/tlb.h
arch/ia64/include/uapi/asm/socket.h
arch/m32r/include/uapi/asm/socket.h
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/futex.h [new file with mode: 0644]
arch/m68k/include/asm/gpio.h
arch/m68k/kernel/head.S
arch/microblaze/include/asm/cacheflush.h
arch/microblaze/include/asm/futex.h
arch/microblaze/include/asm/io.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/kernel/cpu/cache.c
arch/mips/alchemy/board-gpr.c
arch/mips/alchemy/common/time.c
arch/mips/ath79/setup.c
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/dev-enet.c
arch/mips/cavium-octeon/setup.c
arch/mips/cobalt/reset.c
arch/mips/configs/db1000_defconfig
arch/mips/configs/db1235_defconfig
arch/mips/configs/lemote2f_defconfig
arch/mips/include/asm/clock.h
arch/mips/include/asm/idle.h [new file with mode: 0644]
arch/mips/include/asm/io.h
arch/mips/include/asm/kvm.h [deleted file]
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/page.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/ptrace.h
arch/mips/include/uapi/asm/kvm.h [new file with mode: 0644]
arch/mips/include/uapi/asm/ptrace.h
arch/mips/include/uapi/asm/socket.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/Makefile
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/crash_dump.c
arch/mips/kernel/ftrace.c
arch/mips/kernel/genex.S
arch/mips/kernel/idle.c [new file with mode: 0644]
arch/mips/kernel/kprobes.c
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/rtlx.c
arch/mips/kernel/scall64-64.S
arch/mips/kernel/smp.c
arch/mips/kernel/smtc.c
arch/mips/kernel/traps.c
arch/mips/kvm/kvm_mips.c
arch/mips/kvm/kvm_mips_emul.c
arch/mips/kvm/kvm_tlb.c
arch/mips/kvm/kvm_trap_emul.c
arch/mips/lantiq/xway/gptu.c
arch/mips/lib/dump_tlb.c
arch/mips/lib/r3k_dump_tlb.c
arch/mips/loongson/common/reset.c
arch/mips/loongson1/common/reset.c
arch/mips/mm/tlb-r3k.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlb-r8k.c
arch/mips/mm/tlbex.c
arch/mips/netlogic/xlp/setup.c
arch/mips/netlogic/xlr/setup.c
arch/mips/pmcs-msp71xx/msp_prom.c
arch/mips/pmcs-msp71xx/msp_setup.c
arch/mips/ralink/dts/rt3050.dtsi
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/ralink/of.c
arch/mips/txx9/generic/setup.c
arch/mips/txx9/generic/setup_tx4939.c
arch/mips/vr41xx/common/pmu.c
arch/mips/wrppmc/reset.c
arch/mn10300/include/asm/pci.h
arch/mn10300/include/uapi/asm/socket.h
arch/mn10300/kernel/entry.S
arch/mn10300/unit-asb2305/pci.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/hardirq.h
arch/parisc/include/asm/mmzone.h
arch/parisc/include/asm/processor.h
arch/parisc/include/uapi/asm/socket.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/hardware.c
arch/parisc/kernel/irq.c
arch/parisc/kernel/pacache.S
arch/parisc/kernel/setup.c
arch/parisc/kernel/traps.c
arch/parisc/kernel/unaligned.c
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/configs/ps3_defconfig
arch/powerpc/include/asm/context_tracking.h [new file with mode: 0644]
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/firmware.h
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/kvm_asm.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/pgalloc-64.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/pte-hash64-64k.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/rtas.h
arch/powerpc/include/asm/signal.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/tm.h
arch/powerpc/include/asm/udbg.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/include/uapi/asm/socket.h
arch/powerpc/include/uapi/asm/tm.h [new file with mode: 0644]
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/pci_dn.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal.h
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/udbg.c
arch/powerpc/kvm/44x_tlb.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/e500_mmu.c
arch/powerpc/kvm/e500mc.c
arch/powerpc/lib/copypage_power7.S
arch/powerpc/lib/copyuser_power7.S
arch/powerpc/mm/fault.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/init_64.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/powernv/Kconfig
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/powernv/powernv.h
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/Kconfig
arch/powerpc/platforms/pseries/eeh_pseries.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/platforms/pseries/suspend.c
arch/powerpc/platforms/wsp/ics.c
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/ehv_pic.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/udbg_memcons.c [new file with mode: 0644]
arch/powerpc/sysdev/xics/ics-opal.c
arch/s390/Kconfig
arch/s390/appldata/appldata_base.c
arch/s390/include/asm/dma-mapping.h
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/io.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pgtable.h
arch/s390/include/uapi/asm/socket.h
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/ftrace.c
arch/s390/kernel/irq.c
arch/s390/kernel/mcount.S
arch/s390/kernel/mcount64.S
arch/s390/kernel/sclp.S
arch/s390/kernel/smp.c
arch/s390/mm/pgtable.c
arch/s390/pci/pci.c
arch/score/mm/init.c
arch/sh/boards/board-espt.c
arch/sh/boards/board-sh7757lcr.c
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/boards/mach-se/770x/setup.c
arch/sh/boards/mach-se/7724/setup.c
arch/sh/boards/mach-sh7763rdp/setup.c
arch/sh/kernel/cpu/sh2/setup-sh7619.c
arch/sh/kernel/cpu/sh4a/clock-sh7724.c
arch/sh/kernel/cpu/sh4a/clock-sh7734.c
arch/sparc/include/uapi/asm/socket.h
arch/sparc/kernel/prom_common.c
arch/sparc/net/bpf_jit_comp.c
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/crypto/crc32-pclmul_asm.S
arch/x86/crypto/sha256-avx-asm.S
arch/x86/crypto/sha256-ssse3-asm.S
arch/x86/include/asm/efi.h
arch/x86/include/asm/inst.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/i387.c
arch/x86/kernel/microcode_intel_early.c
arch/x86/kernel/process.c
arch/x86/kernel/relocate_kernel_64.S
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/mm/init.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/common.c
arch/x86/pci/mrst.c
arch/x86/platform/efi/efi.c
arch/x86/tools/relocs.c
arch/x86/xen/smp.c
arch/x86/xen/smp.h
arch/xtensa/include/uapi/asm/socket.h
block/blk-core.c
crypto/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_lpss.c
drivers/acpi/apei/cper.c
drivers/acpi/apei/ghes.c
drivers/acpi/csrt.c [deleted file]
drivers/acpi/device_pm.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/acpi/pci_root.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/ata/acard-ahci.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/pata_ep93xx.c
drivers/ata/pdc_adma.c
drivers/ata/sata_promise.c
drivers/ata/sata_rcar.c
drivers/ata/sata_sil.c
drivers/ata/sata_sx4.c
drivers/ata/sata_via.c
drivers/atm/ambassador.c
drivers/base/bus.c
drivers/base/core.c
drivers/base/power/common.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap-debugfs.c
drivers/block/brd.c
drivers/block/cciss.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
drivers/block/pktcdvd.c
drivers/block/rbd.c
drivers/block/xsysace.c
drivers/char/hw_random/mxc-rnga.c
drivers/char/hw_random/omap-rng.c
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/lp.c
drivers/char/random.c
drivers/char/ttyprintk.c
drivers/clk/clk-si5351.c
drivers/clk/clk-vt8500.c
drivers/clk/mxs/clk-imx28.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/tegra/clk-tegra20.c
drivers/clk/ux500/clk-sysctrl.c
drivers/clk/ux500/u8500_clk.c
drivers/clk/x86/clk-lpt.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/arm_big_little.h
drivers/cpufreq/arm_big_little_dt.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/crypto/caam/caamalg.c
drivers/crypto/nx/nx-aes-cbc.c
drivers/crypto/nx/nx-aes-ecb.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/nx/nx.c
drivers/crypto/sahara.c
drivers/dma/acpi-dma.c
drivers/dma/dmatest.c
drivers/dma/ste_dma40.c
drivers/dma/tegra20-apb-dma.c
drivers/edac/amd64_edac_inj.c
drivers/firmware/efi/efivars.c
drivers/gpio/Kconfig
drivers/gpio/gpio-langwell.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-pch.c
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-viperboard.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_encoder_slave.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/gma500/cdv_intel_display.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/psb_intel_display.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/core/include/core/class.h
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/qxl/Kconfig
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r300_cmdbuf.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_family.h
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/tilcdc/Kconfig
drivers/gpu/host1x/drm/dc.c
drivers/hid/hid-multitouch.c
drivers/hv/channel_mgmt.c
drivers/hwmon/abituguru.c
drivers/hwmon/adm1021.c
drivers/hwmon/iio_hwmon.c
drivers/hwmon/nct6775.c
drivers/hwmon/tmp401.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sirf.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core.c
drivers/iio/adc/exynos_adc.c
drivers/iio/buffer_cb.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/dac/Kconfig
drivers/iio/frequency/adf4350.c
drivers/iio/inkern.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/qib/qib_keys.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/input/mouse/synaptics.c
drivers/input/tablet/wacom_wac.c
drivers/input/tablet/wacom_wac.h
drivers/input/touchscreen/egalax_ts.c
drivers/irqchip/irq-mxs.c
drivers/irqchip/irq-versatile-fpga.c
drivers/irqchip/irq-vic.c
drivers/isdn/capi/kcapi.c
drivers/isdn/i4l/isdn_net.c
drivers/leds/leds-gpio.c
drivers/leds/leds-ot200.c
drivers/lguest/page_tables.c
drivers/md/bcache/Kconfig
drivers/md/bcache/bcache.h
drivers/md/bcache/stats.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/pci/zoran/zoran.h
drivers/media/pci/zoran/zoran_driver.c
drivers/media/platform/omap/omap_vout.c
drivers/memory/emif.c
drivers/mfd/Kconfig
drivers/mfd/ab8500-core.c
drivers/mfd/ab8500-debugfs.c
drivers/mfd/ab8500-gpadc.c
drivers/mfd/ab8500-sysctrl.c
drivers/mfd/abx500-core.c
drivers/mfd/cros_ec_spi.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/intel_msic.c
drivers/mfd/si476x-cmd.c
drivers/misc/atmel-ssc.c
drivers/misc/dummy-irq.c
drivers/misc/mei/bus.c
drivers/misc/mei/init.c
drivers/misc/mei/main.c
drivers/misc/mei/nfc.c
drivers/misc/mei/pci-me.c
drivers/misc/sgi-gru/grufile.c
drivers/misc/vmw_vmci/Kconfig
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/mmci.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-pci.c
drivers/mtd/nand/lpc32xx_mlc.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_3ad.h
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/caif/Kconfig
drivers/net/can/Kconfig
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/cc770_isa.c
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/flexcan.c
drivers/net/can/grcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/led.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/sja1000/sja1000_isa.c
drivers/net/can/sja1000/sja1000_of_platform.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.h
drivers/net/can/usb/usb_8dev.c
drivers/net/ethernet/3com/3c509.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adaptec/Kconfig
drivers/net/ethernet/adi/Kconfig
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/allwinner/Kconfig [new file with mode: 0644]
drivers/net/ethernet/allwinner/Makefile [new file with mode: 0644]
drivers/net/ethernet/allwinner/sun4i-emac.c [new file with mode: 0644]
drivers/net/ethernet/allwinner/sun4i-emac.h [new file with mode: 0644]
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/sunlance.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/arc/Kconfig [new file with mode: 0644]
drivers/net/ethernet/arc/Makefile [new file with mode: 0644]
drivers/net/ethernet/arc/emac.h [new file with mode: 0644]
drivers/net/ethernet/arc/emac_main.c [new file with mode: 0644]
drivers/net/ethernet/arc/emac_mdio.c [new file with mode: 0644]
drivers/net/ethernet/atheros/Kconfig
drivers/net/ethernet/atheros/Makefile
drivers/net/ethernet/atheros/alx/Makefile [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/alx.h [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/ethtool.c [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/hw.c [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/hw.h [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/main.c [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/reg.h [new file with mode: 0644]
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcm63xx_enet.h
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bfa_defs.h
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bfa_ioc.h
drivers/net/ethernet/brocade/bna/bna.h
drivers/net/ethernet/brocade/bna/bna_enet.c
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/net/ethernet/brocade/bna/cna.h
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/at91_ether.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/davicom/Kconfig
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/Kconfig
drivers/net/ethernet/dec/tulip/interrupt.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/xircom_cb.c
drivers/net/ethernet/dlink/Kconfig
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/faraday/Kconfig
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/faraday/ftmac100.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/fs_enet/Kconfig
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/ibm/Kconfig
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/mal.c
drivers/net/ethernet/ibm/emac/rgmii.c
drivers/net/ethernet/ibm/emac/tah.c
drivers/net/ethernet/ibm/emac/zmii.c
drivers/net/ethernet/icplus/Kconfig
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/icplus/ipg.h
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/micrel/Kconfig
drivers/net/ethernet/micrel/ks8695net.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/netx-eth.c
drivers/net/ethernet/nuvoton/Kconfig
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/packetengines/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/rdc/Kconfig
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/Kconfig
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/s6gmac.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/filter.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sgi/Kconfig
drivers/net/ethernet/sgi/ioc3-eth.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/silan/sc92031.c
drivers/net/ethernet/sis/Kconfig
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/smsc/Kconfig
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/sun/sunqe.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/ti/tlan.h
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/via/Kconfig
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/via/via-velocity.h
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/ethernet/xilinx/Kconfig
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/fddi/skfp/skfddi.c
drivers/net/hamradio/bpqether.c
drivers/net/hippi/rrunner.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/irda/bfin_sir.c
drivers/net/irda/sh_irda.c
drivers/net/irda/sh_sir.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/netconsole.c
drivers/net/nlmon.c [new file with mode: 0644]
drivers/net/ntb_netdev.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/at803x.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-sun4i.c [new file with mode: 0644]
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/spi_ks8995.c
drivers/net/phy/vitesse.c
drivers/net/ppp/pppoe.c
drivers/net/team/team.c
drivers/net/team/team_mode_loadbalance.c
drivers/net/team/team_mode_random.c
drivers/net/team/team_mode_roundrobin.c
drivers/net/tun.c
drivers/net/usb/Kconfig
drivers/net/usb/cdc_ether.c
drivers/net/usb/kalmia.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/dlci.c
drivers/net/wan/hdlc.c
drivers/net/wan/ixp4xx_hss.c
drivers/net/wan/lapbether.c
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/dfs_debug.c
drivers/net/wireless/ath/ath9k/htc_drv_debug.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/libertas/mesh.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/ntb/ntb_hw.c
drivers/ntb/ntb_transport.c
drivers/of/base.c
drivers/of/of_net.c
drivers/parisc/lba_pci.c
drivers/parisc/superio.c
drivers/parport/Kconfig
drivers/parport/parport_gsc.c
drivers/parport/parport_gsc.h
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/aer/aerdrv_errprint.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinctrl-abx500.c
drivers/pinctrl/pinctrl-coh901.c
drivers/pinctrl/pinctrl-exynos.c
drivers/pinctrl/pinctrl-exynos.h
drivers/pinctrl/pinctrl-exynos5440.c
drivers/pinctrl/pinctrl-lantiq.c
drivers/pinctrl/pinctrl-samsung.c
drivers/pinctrl/pinctrl-samsung.h
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-sunxi.c
drivers/pinctrl/pinctrl-xway.c
drivers/pinctrl/sh-pfc/pfc-r8a7779.c
drivers/pinctrl/vt8500/pinctrl-wm8750.c
drivers/pinctrl/vt8500/pinctrl-wmt.c
drivers/platform/x86/hp-wmi.c
drivers/power/Kconfig
drivers/power/pm2301_charger.c
drivers/power/wm831x_backup.c
drivers/ptp/ptp_pch.c
drivers/pwm/pwm-imx.c
drivers/pwm/pwm-puv3.c
drivers/pwm/pwm-pxa.c
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/pwm-tipwmss.c
drivers/pwm/pwm-vt8500.c
drivers/rapidio/Kconfig
drivers/rapidio/Makefile
drivers/rapidio/devices/tsi721.c
drivers/rapidio/rio-driver.c
drivers/rapidio/rio-scan.c
drivers/rapidio/rio-sysfs.c
drivers/rapidio/rio.c
drivers/rapidio/rio.h
drivers/regulator/core.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/palmas-regulator.c
drivers/rtc/Kconfig
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-max8998.c
drivers/rtc/rtc-nuc900.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-tegra.c
drivers/rtc/rtc-tps6586x.c
drivers/rtc/rtc-twl.c
drivers/s390/block/dasd.c
drivers/s390/block/xpram.c
drivers/s390/cio/chp.c
drivers/s390/cio/chsc.h
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_proc.c
drivers/spi/spi-atmel.c
drivers/spi/spi-davinci.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi-tegra20-sflash.c
drivers/spi/spi-topcliff-pch.c
drivers/spi/spi-xilinx.c
drivers/spi/spi.c
drivers/ssb/sprom.c
drivers/staging/Kconfig
drivers/staging/android/alarm-dev.c
drivers/staging/android/logger.c
drivers/staging/android/logger.h
drivers/staging/comedi/Kconfig
drivers/staging/comedi/comedi_buf.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/ni_labpc.c
drivers/staging/comedi/drivers/ni_labpc.h
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/csr/netdev.c
drivers/staging/dwc2/Kconfig
drivers/staging/dwc2/hcd.c
drivers/staging/dwc2/hcd_intr.c
drivers/staging/dwc2/platform.c
drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
drivers/staging/gdm72xx/Kconfig
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/light/tsl2x7x_core.c
drivers/staging/imx-drm/Kconfig
drivers/staging/imx-drm/imx-tve.c
drivers/staging/imx-drm/ipuv3-crtc.c
drivers/staging/media/solo6x10/Kconfig
drivers/staging/nvec/nvec.c
drivers/staging/nvec/nvec.h
drivers/staging/nvec/nvec_kbd.c
drivers/staging/nvec/nvec_power.c
drivers/staging/nvec/nvec_ps2.c
drivers/staging/rtl8192u/r8192U_core.c
drivers/staging/sep/Kconfig
drivers/staging/silicom/Kconfig
drivers/staging/silicom/bpctl_mod.c
drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
drivers/staging/vt6656/hostap.c
drivers/staging/vt6656/iwctl.c
drivers/staging/zcache/ramster.h
drivers/staging/zcache/ramster/debug.c
drivers/staging/zcache/ramster/ramster-howto.txt [new file with mode: 0644]
drivers/staging/zcache/ramster/ramster.c
drivers/staging/zcache/zcache-main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_parameters.h
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_rd.c
drivers/target/target_core_rd.h
drivers/target/target_core_transport.c
drivers/thermal/armada_thermal.c
drivers/thermal/dove_thermal.c
drivers/thermal/exynos_thermal.c
drivers/tty/ehv_bytechan.c
drivers/tty/mxser.c
drivers/tty/n_tty.c
drivers/tty/rocket.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/imx.c
drivers/tty/serial/mcf.c
drivers/tty/serial/mpc52xx_uart.c
drivers/tty/serial/nwpserial.c
drivers/tty/serial/omap-serial.c
drivers/tty/serial/samsung.c
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/uio/Kconfig
drivers/usb/atm/cxacru.c
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/ci13xxx_imx.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/udc.c
drivers/usb/core/Kconfig
drivers/usb/core/devio.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/atmel_usba_udc.c
drivers/usb/gadget/bcm63xx_udc.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/f_ecm.c
drivers/usb/gadget/f_subset.c
drivers/usb/gadget/f_uac2.c
drivers/usb/gadget/fusb300_udc.c
drivers/usb/gadget/imx_udc.c
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/pxa25x_udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/s3c2410_udc.c
drivers/usb/gadget/zero.c
drivers/usb/host/Kconfig
drivers/usb/host/ehci-atmel.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ehci-s5p.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-spear.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/isp1760-if.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-exynos.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-nxp.c
drivers/usb/host/ohci-omap3.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/ohci-spear.c
drivers/usb/host/oxu210hp-hcd.c
drivers/usb/host/sl811-hcd.c
drivers/usb/host/uhci-hub.c
drivers/usb/host/uhci-platform.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/musb_host.h
drivers/usb/musb/omap2430.c
drivers/usb/phy/Kconfig
drivers/usb/phy/phy-ab8500-usb.c
drivers/usb/phy/phy-fsl-usb.c
drivers/usb/phy/phy-gpio-vbus-usb.c
drivers/usb/phy/phy-isp1301.c
drivers/usb/phy/phy-mv-u3d-usb.c
drivers/usb/phy/phy-mv-usb.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/phy/phy-nop.c
drivers/usb/phy/phy-samsung-usb2.c
drivers/usb/phy/phy-samsung-usb3.c
drivers/usb/serial/ark3116.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/cypress_m8.h
drivers/usb/serial/f81232.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/spcp8x5.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/visor.c
drivers/usb/serial/whiteheat.c
drivers/usb/serial/zte_ev.c
drivers/usb/storage/realtek_cr.c
drivers/vfio/vfio.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/vhost/vringh.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/atmel_lcdfb.c
drivers/video/console/Makefile
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/video/omap2/vrfb.c
drivers/video/ps3fb.c
drivers/video/simplefb.c [new file with mode: 0644]
drivers/w1/masters/omap_hdq.c
drivers/watchdog/ath79_wdt.c
drivers/watchdog/davinci_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/privcmd.c
drivers/xen/tmem.c
drivers/xen/xen-pciback/pci_stub.c
drivers/xen/xen-selfballoon.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_comms.h
drivers/xen/xenbus/xenbus_dev_backend.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe.h
drivers/xen/xenbus/xenbus_probe_frontend.c
fs/aio.c
fs/befs/linuxvfs.c
fs/btrfs/backref.c
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-ref.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.h
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/raid56.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/super.h
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/dns_resolve.c
fs/cifs/inode.c
fs/ecryptfs/file.c
fs/efivarfs/file.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/extents_status.h
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/page-io.c
fs/fat/inode.c
fs/file_table.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/Kconfig
fs/gfs2/bmap.c
fs/gfs2/dir.c
fs/gfs2/file.c
fs/gfs2/inode.c
fs/gfs2/lops.c
fs/gfs2/quota.c
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/hfs/bnode.c
fs/hpfs/dir.c
fs/hpfs/file.c
fs/jfs/jfs_logmgr.c
fs/jfs/super.c
fs/namei.c
fs/ncpfs/dir.c
fs/nfs/callback_proc.c
fs/nfs/callback_xdr.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nilfs2/inode.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/namei.c
fs/pnode.c
fs/proc/base.c
fs/proc/kmsg.c
fs/qnx6/dir.c
fs/reiserfs/dir.c
fs/reiserfs/inode.c
fs/reiserfs/xattr.c
fs/reiserfs/xattr_acl.c
fs/select.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_acl.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_leaf.h
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_attr_remote.h
fs/xfs/xfs_btree.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_da_btree.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_dir2_format.h
fs/xfs/xfs_dir2_leaf.c
fs/xfs/xfs_dir2_node.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_fs.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quota.h
fs/xfs/xfs_super.c
fs/xfs/xfs_symlink.c
fs/xfs/xfs_vnodeops.c
include/acpi/acpi_bus.h
include/acpi/acpiosxf.h
include/acpi/processor.h
include/asm-generic/io.h
include/asm-generic/kvm_para.h
include/asm-generic/tlb.h
include/drm/drmP.h
include/drm/drm_fb_helper.h
include/drm/drm_os_linux.h
include/drm/drm_pciids.h
include/linux/acpi_dma.h
include/linux/aer.h
include/linux/brcmphy.h
include/linux/cgroup.h
include/linux/cpu.h
include/linux/filter.h
include/linux/if_link.h
include/linux/if_macvlan.h
include/linux/if_team.h
include/linux/if_vlan.h
include/linux/igmp.h
include/linux/inetdevice.h
include/linux/journal-head.h
include/linux/kernel.h
include/linux/kref.h
include/linux/ktime.h
include/linux/list.h
include/linux/marvell_phy.h
include/linux/math64.h
include/linux/mfd/abx500/ab8500.h
include/linux/mlx4/cmd.h
include/linux/mlx4/qp.h
include/linux/mv643xx_eth.h
include/linux/net.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter_ipv6.h
include/linux/netlink.h
include/linux/netpoll.h
include/linux/of_platform.h
include/linux/pci-acpi.h
include/linux/phy.h
include/linux/pinctrl/pinconf-generic.h
include/linux/platform_data/clk-lpss.h
include/linux/platform_data/serial-omap.h
include/linux/printk.h
include/linux/rculist.h
include/linux/rculist_nulls.h
include/linux/rcupdate.h
include/linux/rio.h
include/linux/rio_drv.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/smp.h
include/linux/socket.h
include/linux/spi/spi.h
include/linux/swapops.h
include/linux/syslog.h
include/linux/tcp.h
include/linux/time.h
include/linux/tracepoint.h
include/linux/uio.h
include/linux/usb/gadget.h
include/linux/usb/serial.h
include/linux/vt_kern.h
include/linux/wait.h
include/net/act_api.h
include/net/addrconf.h
include/net/gen_stats.h
include/net/gre.h
include/net/if_inet6.h
include/net/inet_ecn.h
include/net/inet_sock.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/ll_poll.h [new file with mode: 0644]
include/net/ndisc.h
include/net/net_namespace.h
include/net/netfilter/nf_log.h
include/net/netfilter/nfnetlink_log.h
include/net/netfilter/xt_rateest.h
include/net/netns/x_tables.h
include/net/ping.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/net/transp_v6.h
include/net/udp.h
include/net/xfrm.h
include/sound/soc-dapm.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/trace/events/ext4.h
include/uapi/asm-generic/poll.h
include/uapi/asm-generic/socket.h
include/uapi/linux/ethtool.h
include/uapi/linux/gen_stats.h
include/uapi/linux/if_arp.h
include/uapi/linux/if_link.h
include/uapi/linux/if_tun.h
include/uapi/linux/kvm.h
include/uapi/linux/openvswitch.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/snmp.h
include/uapi/linux/tipc.h
include/uapi/linux/tipc_config.h
include/uapi/linux/virtio_console.h
include/video/omapdss.h
include/xen/interface/io/netif.h
include/xen/xenbus.h
init/Kconfig
ipc/sem.c
kernel/audit.c
kernel/audit_tree.c
kernel/auditfilter.c
kernel/cgroup.c
kernel/cpu.c
kernel/cpu/idle.c
kernel/events/core.c
kernel/exit.c
kernel/irq/irqdomain.c
kernel/kmod.c
kernel/module.c
kernel/printk.c
kernel/range.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/softirq.c
kernel/sys.c
kernel/time/Kconfig
kernel/time/ntp.c
kernel/time/tick-broadcast.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_selftest.c
kernel/workqueue.c
lib/Makefile
lib/iovec.c [new file with mode: 0644]
lib/klist.c
lib/mpi/longlong.h
lib/mpi/mpicoder.c
mm/frontswap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/mmu_notifier.c
mm/page_alloc.c
mm/pagewalk.c
mm/swap_state.c
mm/swapfile.c
net/802/mrp.c
net/8021q/vlan.c
net/9p/client.c
net/Kconfig
net/Makefile
net/appletalk/aarp.c
net/appletalk/ddp.c
net/atm/clip.c
net/atm/mpc.c
net/ax25/af_ax25.c
net/ax25/sysctl_net_ax25.c
net/batman-adv/Makefile
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/hard-interface.c
net/batman-adv/icmp_socket.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/ring_buffer.c [deleted file]
net/batman-adv/ring_buffer.h [deleted file]
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c
net/batman-adv/vis.c
net/bridge/br_device.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_notify.c
net/bridge/br_private.h
net/bridge/br_sysfs_br.c
net/bridge/br_sysfs_if.c
net/bridge/netfilter/ebt_log.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtables.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/can/af_can.c
net/can/bcm.c
net/can/gw.c
net/can/raw.c
net/ceph/osd_client.c
net/compat.c
net/core/datagram.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/drop_monitor.c
net/core/dst.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/gen_estimator.c
net/core/gen_stats.c
net/core/iovec.c
net/core/link_watch.c
net/core/neighbour.c
net/core/net-procfs.c
net/core/netpoll.c
net/core/netprio_cgroup.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/sysctl_net_core.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/sysctl_net_decnet.c
net/ieee802154/6lowpan.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/gre.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c [new file with mode: 0644]
net/ipv4/ip_vti.c
net/ipv4/ipcomp.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c [new file with mode: 0644]
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c [new file with mode: 0644]
net/ipv4/xfrm4_tunnel.c
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/exthdrs_core.c
net/ipv6/icmp.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/mip6.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6t_MASQUERADE.c
net/ipv6/output_core.c
net/ipv6/ping.c [new file with mode: 0644]
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udp_impl.h
net/ipv6/udp_offload.c
net/ipv6/udplite.c
net/ipv6/xfrm6_policy.c
net/ipx/af_ipx.c
net/irda/irlap_frame.c
net/irda/irsysctl.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_ppp.c
net/mac80211/iface.c
net/mpls/Kconfig [new file with mode: 0644]
net/mpls/Makefile [new file with mode: 0644]
net/mpls/mpls_gso.c [new file with mode: 0644]
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_log.c
net/netfilter/nf_nat_helper.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_CT.c
net/netfilter/xt_LOG.c
net/netfilter/xt_NFLOG.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_TEE.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_rateest.c
net/netfilter/xt_socket.c
net/netlabel/netlabel_domainhash.c
net/netlabel/netlabel_unlabeled.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/netrom/af_netrom.c
net/netrom/sysctl_net_netrom.c
net/openvswitch/Kconfig
net/openvswitch/Makefile
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/dp_notify.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/vport-gre.c [new file with mode: 0644]
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport-netdev.h
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/phonet/pn_dev.c
net/phonet/sysctl.c
net/rds/ib_sysctl.c
net/rds/iw_sysctl.c
net/rds/sysctl.c
net/rose/af_rose.c
net/rose/sysctl_net_rose.c
net/sched/act_mirred.c
net/sched/act_police.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_drr.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_qfq.c
net/sched/sch_tbf.c
net/sctp/associola.c
net/sctp/bind_addr.c
net/sctp/chunk.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/outqueue.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/netns.h
net/sunrpc/rpc_pipe.c
net/sunrpc/sched.c
net/sunrpc/svcauth_unix.c
net/sunrpc/sysctl.c
net/sunrpc/xprtrdma/svc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtsock.c
net/tipc/Makefile
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/eth_media.c
net/tipc/ib_media.c
net/tipc/link.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/node_subscr.c
net/tipc/port.c
net/tipc/port.h
net/tipc/server.c [new file with mode: 0644]
net/tipc/server.h [new file with mode: 0644]
net/tipc/socket.c
net/tipc/subscr.c
net/tipc/subscr.h
net/tipc/sysctl.c [new file with mode: 0644]
net/unix/sysctl_net_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/core.c
net/wireless/nl80211.c
net/x25/af_x25.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_proc.c
net/xfrm/xfrm_user.c
scripts/Makefile.lib
scripts/config
scripts/dtc/dtc-lexer.l
scripts/dtc/dtc-lexer.lex.c_shipped
scripts/dtc/dtc-parser.tab.c_shipped
scripts/dtc/dtc-parser.tab.h_shipped
scripts/kconfig/lxdialog/menubox.c
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
scripts/package/Makefile
security/selinux/netif.c
security/selinux/xfrm.c
sound/aoa/fabrics/layout.c
sound/aoa/soundbus/i2sbus/core.c
sound/core/pcm_native.c
sound/oss/Kconfig
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/sis7019.c
sound/soc/codecs/ab8500-codec.h
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l52.h
sound/soc/codecs/da7213.c
sound/soc/codecs/max98090.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm0010.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8994.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/fsl/imx-ssi.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/soc-compress.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/usb/6fire/firmware.c
sound/usb/mixer.c
sound/usb/proc.c
sound/usb/quirks-table.h
tools/perf/scripts/python/net_dropmonitor.py
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/Makefile
tools/testing/selftests/soft-dirty/Makefile [deleted file]
tools/testing/selftests/soft-dirty/soft-dirty.c [deleted file]

index 77db8809bd9648b94097be84e81677fb6008f460..b3a7e7d384f6583ede08ffec4957c6f40c45172b 100644 (file)
@@ -319,7 +319,10 @@ cache<0..n>
   Symlink to each of the cache devices comprising this cache set. 
 
 cache_available_percent
-  Percentage of cache device free.
+  Percentage of cache device which doesn't contain dirty data, and could
+  potentially be used for writeback.  This doesn't mean this space isn't used
+  for clean cached data; the unused statistic (in priority_stats) is typically
+  much lower.
 
 clear_stats
   Clears the statistics associated with this cache
@@ -423,8 +426,11 @@ nbuckets
   Total buckets in this cache
 
 priority_stats
-  Statistics about how recently data in the cache has been accessed.  This can
-  reveal your working set size.
+  Statistics about how recently data in the cache has been accessed.
+  This can reveal your working set size.  Unused is the percentage of
+  the cache that doesn't contain any data.  Metadata is bcache's
+  metadata overhead.  Average is the average priority of cache buckets.
+  Next is a list of quantiles with the priority threshold of each.
 
 written
   Sum of all data that has been written to the cache; comparison with
index 08f01e79c41a3e2f3902db5cc19e2ccaa34b382b..b9015912bca6bb6ea7a73ef4dc2a94f189346385 100644 (file)
@@ -498,12 +498,8 @@ Your cooperation is appreciated.
 
                Each device type has 5 bits (32 minors).
 
- 13 block      8-bit MFM/RLL/IDE controller
-                 0 = /dev/xda          First XT disk whole disk
-                64 = /dev/xdb          Second XT disk whole disk
-
-               Partitions are handled in the same way as IDE disks
-               (see major number 3).
+ 13 block      Previously used for the XT disk (/dev/xdN)
+               Deleted in kernel v3.9.
 
  14 char       Open Sound System (OSS)
                  0 = /dev/mixer        Mixer control
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
new file mode 100644 (file)
index 0000000..b90bfcd
--- /dev/null
@@ -0,0 +1,22 @@
+* Allwinner EMAC ethernet controller
+
+Required properties:
+- compatible: should be "allwinner,sun4i-emac".
+- reg: address and length of the register set for the device.
+- interrupts: interrupt for the device
+- phy: A phandle to a phy node defining the PHY address (as the reg
+  property, a single integer).
+- clocks: A phandle to the reference clock for this device
+
+Optional properties:
+- (local-)mac-address: mac address to be used by this driver
+
+Example:
+
+emac: ethernet@01c0b000 {
+       compatible = "allwinner,sun4i-emac";
+       reg = <0x01c0b000 0x1000>;
+       interrupts = <55>;
+       clocks = <&ahb_gates 17>;
+       phy = <&phy0>;
+};
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
new file mode 100644 (file)
index 0000000..00b9f9a
--- /dev/null
@@ -0,0 +1,26 @@
+* Allwinner A10 MDIO Ethernet Controller interface
+
+Required properties:
+- compatible: should be "allwinner,sun4i-mdio".
+- reg: address and length of the register set for the device.
+
+Optional properties:
+- phy-supply: phandle to a regulator if the PHY needs one
+
+Example at the SoC level:
+mdio@01c0b080 {
+       compatible = "allwinner,sun4i-mdio";
+       reg = <0x01c0b080 0x14>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+};
+
+And at the board level:
+
+mdio@01c0b080 {
+       phy-supply = <&reg_emac_3v3>;
+
+       phy0: ethernet-phy@0 {
+               reg = <0>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/arc_emac.txt b/Documentation/devicetree/bindings/net/arc_emac.txt
new file mode 100644 (file)
index 0000000..bcbc3f0
--- /dev/null
@@ -0,0 +1,38 @@
+* Synopsys ARC EMAC 10/100 Ethernet driver (EMAC)
+
+Required properties:
+- compatible: Should be "snps,arc-emac"
+- reg: Address and length of the register set for the device
+- interrupts: Should contain the EMAC interrupts
+- clock-frequency: CPU frequency. It is needed to calculate and set polling
+period of EMAC.
+- max-speed: Maximum supported data-rate in Mbit/s. In some HW configurations
+bandwidth of external memory controller might be a limiting factor. That's why
+it's required to specify which data-rate is supported on current SoC or FPGA.
+For example if only 10 Mbit/s is supported (10BASE-T) set "10". If 100 Mbit/s is
+supported (100BASE-TX) set "100".
+- phy: PHY device attached to the EMAC via MDIO bus
+
+Child nodes of the driver are the individual PHY devices connected to the
+MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
+
+Optional properties:
+- mac-address: 6 bytes, mac address
+
+Examples:
+
+       ethernet@c0fc2000 {
+               compatible = "snps,arc-emac";
+               reg = <0xc0fc2000 0x3c>;
+               interrupts = <6>;
+               mac-address = [ 00 11 22 33 44 55 ];
+               clock-frequency = <80000000>;
+               max-speed = <100>;
+               phy = <&phy0>;
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+               phy0: ethernet-phy@0 {
+                       reg = <1>;
+               };
+       };
index 4f2ca6b4a182d3f9584c72b05414deacb085aebf..05d660e4ac6402f57be201105b94147c69776b53 100644 (file)
@@ -28,6 +28,8 @@ Optional properties:
 Slave Properties:
 Required properties:
 - phy_id               : Specifies slave phy id
+- phy-mode             : The interface between the SoC and the PHY (a string
+                         that of_get_phy_mode() can understand)
 - mac-address          : Specifies slave MAC address
 
 Optional properties:
@@ -58,11 +60,13 @@ Examples:
                cpts_clock_shift = <29>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
+                       phy-mode = "rgmii-txid";
                        /* Filled in by U-Boot */
                        mac-address = [ 00 00 00 00 00 00 ];
                };
                cpsw_emac1: slave@1 {
                        phy_id = <&davinci_mdio>, <1>;
+                       phy-mode = "rgmii-txid";
                        /* Filled in by U-Boot */
                        mac-address = [ 00 00 00 00 00 00 ];
                };
@@ -84,11 +88,13 @@ Examples:
                cpts_clock_shift = <29>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
+                       phy-mode = "rgmii-txid";
                        /* Filled in by U-Boot */
                        mac-address = [ 00 00 00 00 00 00 ];
                };
                cpsw_emac1: slave@1 {
                        phy_id = <&davinci_mdio>, <1>;
+                       phy-mode = "rgmii-txid";
                        /* Filled in by U-Boot */
                        mac-address = [ 00 00 00 00 00 00 ];
                };
diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
new file mode 100644 (file)
index 0000000..2d39c99
--- /dev/null
@@ -0,0 +1,26 @@
+Davicom DM9000 Fast Ethernet controller
+
+Required properties:
+- compatible = "davicom,dm9000";
+- reg : physical addresses and sizes of registers, must contain 2 entries:
+    first entry : address register,
+    second entry : data register.
+- interrupt-parent : interrupt controller to which the device is connected
+- interrupts : interrupt specifier specific to interrupt controller
+
+Optional properties:
+- local-mac-address : A bytestring of 6 bytes specifying Ethernet MAC address
+    to use (from firmware or bootloader)
+- davicom,no-eeprom : Configuration EEPROM is not available
+- davicom,ext-phy : Use external PHY
+
+Example:
+
+       ethernet@18000000 {
+               compatible = "davicom,dm9000";
+               reg = <0x18000000 0x2 0x18000004 0x2>;
+               interrupt-parent = <&gpn>;
+               interrupts = <7 4>;
+               local-mac-address = [00 00 de ad be ef];
+               davicom,no-eeprom;
+       };
index 44afa0e5057d1580698fa81462925d87a9896856..4ff65047bb9a8c63e3e647331fe2f32e1551db56 100644 (file)
@@ -4,7 +4,7 @@ Required properties:
 - compatible: Should be "cdns,[<chip>-]{macb|gem}"
   Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
   Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
-  Use "cnds,pc302-gem" for Picochip picoXcell pc302 and later devices based on
+  Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
new file mode 100644 (file)
index 0000000..a73b79f
--- /dev/null
@@ -0,0 +1,85 @@
+Marvell Orion/Discovery ethernet controller
+=============================================
+
+The Marvell Discovery ethernet controller can be found on Marvell Orion SoCs
+(Kirkwood, Dove, Orion5x, and Discovery Innovation) and as part of Marvell
+Discovery system controller chips (mv64[345]60).
+
+The Discovery ethernet controller is described with two levels of nodes. The
+first level describes the ethernet controller itself and the second level
+describes up to 3 ethernet port nodes within that controller. The reason for
+the multiple levels is that the port registers are interleaved within a single
+set of controller registers. Each port node describes port-specific properties.
+
+Note: The above separation is only true for Discovery system controllers.
+For Orion SoCs we stick to the separation, although there each controller has
+only one port associated. Multiple ports are implemented as multiple single-port
+controllers. As Kirkwood has some issues with proper initialization after reset,
+an extra compatible string is added for it.
+
+* Ethernet controller node
+
+Required controller properties:
+ - #address-cells: shall be 1.
+ - #size-cells: shall be 0.
+ - compatible: shall be one of "marvell,orion-eth", "marvell,kirkwood-eth".
+ - reg: address and length of the controller registers.
+
+Optional controller properties:
+ - clocks: phandle reference to the controller clock.
+ - marvell,tx-checksum-limit: max tx packet size for hardware checksum.
+
+* Ethernet port node
+
+Required port properties:
+ - device_type: shall be "network".
+ - compatible: shall be one of "marvell,orion-eth-port",
+      "marvell,kirkwood-eth-port".
+ - reg: port number relative to ethernet controller, shall be 0, 1, or 2.
+ - interrupts: port interrupt.
+ - local-mac-address: 6 bytes MAC address.
+
+Optional port properties:
+ - marvell,tx-queue-size: size of the transmit ring buffer.
+ - marvell,tx-sram-addr: address of transmit descriptor buffer located in SRAM.
+ - marvell,tx-sram-size: size of transmit descriptor buffer located in SRAM.
+ - marvell,rx-queue-size: size of the receive ring buffer.
+ - marvell,rx-sram-addr: address of receive descriptor buffer located in SRAM.
+ - marvell,rx-sram-size: size of receive descriptor buffer located in SRAM.
+
+and
+
+ - phy-handle: phandle reference to ethernet PHY.
+
+or
+
+ - speed: port speed if no PHY connected.
+ - duplex: port mode if no PHY connected.
+
+* Node example:
+
+mdio-bus {
+       ...
+       ethphy: ethernet-phy@8 {
+               device_type = "ethernet-phy";
+               ...
+       };
+};
+
+eth: ethernet-controller@72000 {
+       compatible = "marvell,orion-eth";
+       #address-cells = <1>;
+       #size-cells = <0>;
+       reg = <0x72000 0x2000>;
+       clocks = <&gate_clk 2>;
+       marvell,tx-checksum-limit = <1600>;
+
+       ethernet@0 {
+               device_type = "network";
+               compatible = "marvell,orion-eth-port";
+               reg = <0>;
+               interrupts = <29>;
+               phy-handle = <&ethphy>;
+               local-mac-address = [00 00 00 00 00 00];
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
new file mode 100644 (file)
index 0000000..11ace3c
--- /dev/null
@@ -0,0 +1,9 @@
+Micrel KS8851 Ethernet mac
+
+Required properties:
+- compatible = "micrel,ks8851-ml" of parallel interface
+- reg : 2 physical address and size of registers for data and command
+- interrupts : interrupt connection
+
+Optional properties:
+- local-mac-address : Ethernet mac address to use
diff --git a/Documentation/devicetree/bindings/net/via-velocity.txt b/Documentation/devicetree/bindings/net/via-velocity.txt
new file mode 100644 (file)
index 0000000..b3db469
--- /dev/null
@@ -0,0 +1,20 @@
+* VIA Velocity 10/100/1000 Network Controller
+
+Required properties:
+- compatible : Should be "via,velocity-vt6110"
+- reg : Address and length of the io space
+- interrupts : Should contain the controller interrupt line
+
+Optional properties:
+- no-eeprom : PCI network cards use an external EEPROM to store data. Embedded
+       devices quite often set this data in uboot and do not provide an eeprom.
+       Specify this option if you have no external eeprom.
+
+Examples:
+
+eth0@d8004000 {
+       compatible = "via,velocity-vt6110";
+       reg = <0xd8004000 0x400>;
+       interrupts = <10>;
+       no-eeprom;
+};
index 2a3feabd3b224e0a75a2a266cb822052163f9620..34c1505774bfc46fd27ce318aa041fd66dc3a321 100644 (file)
@@ -1,7 +1,7 @@
 Atmel AT91RM9200 Real Time Clock
 
 Required properties:
-- compatible: should be: "atmel,at91rm9200-rtc"
+- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
 - reg: physical base address of the controller and length of memory mapped
   region.
 - interrupts: rtc alarm/event interrupt
index 6931c4348d240ed9f8bf6b21a0d75f9c520edf1d..2fe74e6ec209500b593280873b5f82ea1b68b150 100644 (file)
@@ -18,6 +18,7 @@ chrp  Common Hardware Reference Platform
 cirrus Cirrus Logic, Inc.
 cortina        Cortina Systems, Inc.
 dallas Maxim Integrated Products (formerly Dallas Semiconductor)
+davicom        DAVICOM Semiconductor, Inc.
 denx   Denx Software Engineering
 emmicro        EM Microelectronic
 epson  Seiko Epson Corp.
diff --git a/Documentation/devicetree/bindings/video/simple-framebuffer.txt b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
new file mode 100644 (file)
index 0000000..3ea4605
--- /dev/null
@@ -0,0 +1,25 @@
+Simple Framebuffer
+
+A simple frame-buffer describes a raw memory region that may be rendered to,
+with the assumption that the display hardware has already been set up to scan
+out from that buffer.
+
+Required properties:
+- compatible: "simple-framebuffer"
+- reg: Should contain the location and size of the framebuffer memory.
+- width: The width of the framebuffer in pixels.
+- height: The height of the framebuffer in pixels.
+- stride: The number of bytes in each line of the framebuffer.
+- format: The format of the framebuffer surface. Valid values are:
+  - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
+
+Example:
+
+       framebuffer {
+               compatible = "simple-framebuffer";
+               reg = <0x1d385000 (1600 * 1200 * 2)>;
+               width = <1600>;
+               height = <1200>;
+               stride = <(1600 * 2)>;
+               format = "r5g6b5";
+       };
index ef9d06c9f8fde9341c3dc1926f83524fb7a47396..0efedaad5165d74a1ef8273fe1ff9491c096d97a 100644 (file)
@@ -191,9 +191,11 @@ Linux it will look something like this:
        };
 
 The bootargs property contains the kernel arguments, and the initrd-*
-properties define the address and size of an initrd blob.  The
-chosen node may also optionally contain an arbitrary number of
-additional properties for platform-specific configuration data.
+properties define the address and size of an initrd blob.  Note that
+initrd-end is the first address after the initrd image, so this doesn't
+match the usual semantic of struct resource.  The chosen node may also
+optionally contain an arbitrary number of additional properties for
+platform-specific configuration data.
 
 During early boot, the architecture setup code calls of_scan_flat_dt()
 several times with different helper callbacks to parse device tree
index 279ac0a8c5b11dd12305e8ce8f0a15ce8a3aaaef..132a094c7bc3790631b664efb20b6cfb21a3d42e 100644 (file)
@@ -34,7 +34,7 @@ command:
 After a while you will start to get messages about current status or error like
 in the original code.
 
-Note that running a new test will stop any in progress test.
+Note that running a new test will not stop any in progress test.
 
 The following command should return actual state of the test.
        % cat /sys/kernel/debug/dmatest/run
@@ -52,8 +52,8 @@ To wait for test done the user may perform a busy loop that checks the state.
 
 The module parameters that is supplied to the kernel command line will be used
 for the first performed test. After user gets a control, the test could be
-interrupted or re-run with same or different parameters. For the details see
-the above section "Part 2 - When dmatest is built as a module..."
+re-run with the same or different parameters. For the details see the above
+section "Part 2 - When dmatest is built as a module..."
 
 In both cases the module parameters are used as initial values for the test case.
 You always could check them at run-time by running
index 3e4b3dd1e046a76b02c45f91c8779ae82e402d1f..83577f0232a0676e479bc6280ce898517a746ec8 100644 (file)
@@ -33,6 +33,9 @@ When mounting an XFS filesystem, the following options are accepted.
        removing extended attributes) the on-disk superblock feature
        bit field will be updated to reflect this format being in use.
 
+       CRC enabled filesystems always use the attr2 format, and so
+       will reject the noattr2 mount option if it is set.
+
   barrier
        Enables the use of block layer write barriers for writes into
        the journal and unwritten extent conversion.  This allows for
index c3bfacb92910dc8d044bec88122c4a85328ed8d5..2fe6e767b3d6013f3d1023c2e518b0466fa27e2f 100644 (file)
@@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Force threading of all interrupt handlers except those
                        marked explicitly IRQF_NO_THREAD.
 
+       tmem            [KNL,XEN]
+                       Enable the Transcendent memory driver if built-in.
+
+       tmem.cleancache=0|1 [KNL, XEN]
+                       Default is on (1). Disable the usage of the cleancache
+                       API to send anonymous pages to the hypervisor.
+
+       tmem.frontswap=0|1 [KNL, XEN]
+                       Default is on (1). Disable the usage of the frontswap
+                       API to send swap pages to the hypervisor. If disabled
+                       the selfballooning and selfshrinking are force disabled.
+
+       tmem.selfballooning=0|1 [KNL, XEN]
+                       Default is on (1). Disable the driving of swap pages
+                       to the hypervisor.
+
+       tmem.selfshrinking=0|1 [KNL, XEN]
+                       Default is on (1). Partial swapoff that immediately
+                       transfers pages from Xen hypervisor back to the
+                       kernel based on different criteria.
+
        topology=       [S390]
                        Format: {off | on}
                        Specify if the kernel should make use of the cpu
@@ -3330,9 +3351,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        plus one apbt timer for broadcast timer.
                        x86_mrst_timer=apbt_only | lapic_and_apbt
 
-       xd=             [HW,XT] Original XT pre-IDE (RLL encoded) disks.
-       xd_geo=         See header of drivers/block/xd.c.
-
        xen_emul_unplug=                [HW,X86,XEN]
                        Unplug Xen emulated devices
                        Format: [unplug0,][unplug1]
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
new file mode 100644 (file)
index 0000000..cbf7ae4
--- /dev/null
@@ -0,0 +1,202 @@
+REDUCING OS JITTER DUE TO PER-CPU KTHREADS
+
+This document lists per-CPU kthreads in the Linux kernel and presents
+options to control their OS jitter.  Note that non-per-CPU kthreads are
+not listed here.  To reduce OS jitter from non-per-CPU kthreads, bind
+them to a "housekeeping" CPU dedicated to such work.
+
+
+REFERENCES
+
+o      Documentation/IRQ-affinity.txt:  Binding interrupts to sets of CPUs.
+
+o      Documentation/cgroups:  Using cgroups to bind tasks to sets of CPUs.
+
+o      man taskset:  Using the taskset command to bind tasks to sets
+       of CPUs.
+
+o      man sched_setaffinity:  Using the sched_setaffinity() system
+       call to bind tasks to sets of CPUs.
+
+o      /sys/devices/system/cpu/cpuN/online:  Control CPU N's hotplug state,
+       writing "0" to offline and "1" to online.
+
+o      In order to locate kernel-generated OS jitter on CPU N:
+
+               cd /sys/kernel/debug/tracing
+               echo 1 > max_graph_depth # Increase the "1" for more detail
+               echo function_graph > current_tracer
+               # run workload
+               cat per_cpu/cpuN/trace
+
+
+KTHREADS
+
+Name: ehca_comp/%u
+Purpose: Periodically process Infiniband-related work.
+To reduce its OS jitter, do any of the following:
+1.     Don't use eHCA Infiniband hardware, instead choosing hardware
+       that does not require per-CPU kthreads.  This will prevent these
+       kthreads from being created in the first place.  (This will
+       work for most people, as this hardware, though important, is
+       relatively old and is produced in relatively low unit volumes.)
+2.     Do all eHCA-Infiniband-related work on other CPUs, including
+       interrupts.
+3.     Rework the eHCA driver so that its per-CPU kthreads are
+       provisioned only on selected CPUs.
+
+
+Name: irq/%d-%s
+Purpose: Handle threaded interrupts.
+To reduce its OS jitter, do the following:
+1.     Use irq affinity to force the irq threads to execute on
+       some other CPU.
+
+Name: kcmtpd_ctr_%d
+Purpose: Handle Bluetooth work.
+To reduce its OS jitter, do one of the following:
+1.     Don't use Bluetooth, in which case these kthreads won't be
+       created in the first place.
+2.     Use irq affinity to force Bluetooth-related interrupts to
+       occur on some other CPU and furthermore initiate all
+       Bluetooth activity on some other CPU.
+
+Name: ksoftirqd/%u
+Purpose: Execute softirq handlers when threaded or when under heavy load.
+To reduce its OS jitter, each softirq vector must be handled
+separately as follows:
+TIMER_SOFTIRQ:  Do all of the following:
+1.     To the extent possible, keep the CPU out of the kernel when it
+       is non-idle, for example, by avoiding system calls and by forcing
+       both kernel threads and interrupts to execute elsewhere.
+2.     Build with CONFIG_HOTPLUG_CPU=y.  After boot completes, force
+       the CPU offline, then bring it back online.  This forces
+       recurring timers to migrate elsewhere.  If you are concerned
+       with multiple CPUs, force them all offline before bringing the
+       first one back online.  Once you have onlined the CPUs in question,
+       do not offline any other CPUs, because doing so could force the
+       timer back onto one of the CPUs in question.
+NET_TX_SOFTIRQ and NET_RX_SOFTIRQ:  Do all of the following:
+1.     Force networking interrupts onto other CPUs.
+2.     Initiate any network I/O on other CPUs.
+3.     Once your application has started, prevent CPU-hotplug operations
+       from being initiated from tasks that might run on the CPU to
+       be de-jittered.  (It is OK to force this CPU offline and then
+       bring it back online before you start your application.)
+BLOCK_SOFTIRQ:  Do all of the following:
+1.     Force block-device interrupts onto some other CPU.
+2.     Initiate any block I/O on other CPUs.
+3.     Once your application has started, prevent CPU-hotplug operations
+       from being initiated from tasks that might run on the CPU to
+       be de-jittered.  (It is OK to force this CPU offline and then
+       bring it back online before you start your application.)
+BLOCK_IOPOLL_SOFTIRQ:  Do all of the following:
+1.     Force block-device interrupts onto some other CPU.
+2.     Initiate any block I/O and block-I/O polling on other CPUs.
+3.     Once your application has started, prevent CPU-hotplug operations
+       from being initiated from tasks that might run on the CPU to
+       be de-jittered.  (It is OK to force this CPU offline and then
+       bring it back online before you start your application.)
+TASKLET_SOFTIRQ: Do one or more of the following:
+1.     Avoid use of drivers that use tasklets.  (Such drivers will contain
+       calls to things like tasklet_schedule().)
+2.     Convert all drivers that you must use from tasklets to workqueues.
+3.     Force interrupts for drivers using tasklets onto other CPUs,
+       and also do I/O involving these drivers on other CPUs.
+SCHED_SOFTIRQ: Do all of the following:
+1.     Avoid sending scheduler IPIs to the CPU to be de-jittered,
+       for example, ensure that at most one runnable kthread is present
+       on that CPU.  If a thread that expects to run on the de-jittered
+       CPU awakens, the scheduler will send an IPI that can result in
+       a subsequent SCHED_SOFTIRQ.
+2.     Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
+       CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
+       to be de-jittered is marked as an adaptive-ticks CPU using the
+       "nohz_full=" boot parameter.  This reduces the number of
+       scheduler-clock interrupts that the de-jittered CPU receives,
+       minimizing its chances of being selected to do the load balancing
+       work that runs in SCHED_SOFTIRQ context.
+3.     To the extent possible, keep the CPU out of the kernel when it
+       is non-idle, for example, by avoiding system calls and by
+       forcing both kernel threads and interrupts to execute elsewhere.
+       This further reduces the number of scheduler-clock interrupts
+       received by the de-jittered CPU.
+HRTIMER_SOFTIRQ:  Do all of the following:
+1.     To the extent possible, keep the CPU out of the kernel when it
+       is non-idle.  For example, avoid system calls and force both
+       kernel threads and interrupts to execute elsewhere.
+2.     Build with CONFIG_HOTPLUG_CPU=y.  Once boot completes, force the
+       CPU offline, then bring it back online.  This forces recurring
+       timers to migrate elsewhere.  If you are concerned with multiple
+       CPUs, force them all offline before bringing the first one
+       back online.  Once you have onlined the CPUs in question, do not
+       offline any other CPUs, because doing so could force the timer
+       back onto one of the CPUs in question.
+RCU_SOFTIRQ:  Do at least one of the following:
+1.     Offload callbacks and keep the CPU in either dyntick-idle or
+       adaptive-ticks state by doing all of the following:
+       a.      Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
+               CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
+               to be de-jittered is marked as an adaptive-ticks CPU using
+               the "nohz_full=" boot parameter.  Bind the rcuo kthreads
+               to housekeeping CPUs, which can tolerate OS jitter.
+       b.      To the extent possible, keep the CPU out of the kernel
+               when it is non-idle, for example, by avoiding system
+               calls and by forcing both kernel threads and interrupts
+               to execute elsewhere.
+2.     Enable RCU to do its processing remotely via dyntick-idle by
+       doing all of the following:
+       a.      Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
+       b.      Ensure that the CPU goes idle frequently, allowing other
+               CPUs to detect that it has passed through an RCU quiescent
+               state.  If the kernel is built with CONFIG_NO_HZ_FULL=y,
+               userspace execution also allows other CPUs to detect that
+               the CPU in question has passed through a quiescent state.
+       c.      To the extent possible, keep the CPU out of the kernel
+               when it is non-idle, for example, by avoiding system
+               calls and by forcing both kernel threads and interrupts
+               to execute elsewhere.
+
+Name: rcuc/%u
+Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
+To reduce its OS jitter, do at least one of the following:
+1.     Build the kernel with CONFIG_PREEMPT=n.  This prevents these
+       kthreads from being created in the first place, and also obviates
+       the need for RCU priority boosting.  This approach is feasible
+       for workloads that do not require high degrees of responsiveness.
+2.     Build the kernel with CONFIG_RCU_BOOST=n.  This prevents these
+       kthreads from being created in the first place.  This approach
+       is feasible only if your workload never requires RCU priority
+       boosting, for example, if you ensure frequent idle time on all
+       CPUs that might execute within the kernel.
+3.     Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
+       which offloads all RCU callbacks to kthreads that can be moved
+       off of CPUs susceptible to OS jitter.  This approach prevents the
+       rcuc/%u kthreads from having any work to do, so that they are
+       never awakened.
+4.     Ensure that the CPU never enters the kernel, and, in particular,
+       avoid initiating any CPU hotplug operations on this CPU.  This is
+       another way of preventing any callbacks from being queued on the
+       CPU, again preventing the rcuc/%u kthreads from having any work
+       to do.
+
+Name: rcuob/%d, rcuop/%d, and rcuos/%d
+Purpose: Offload RCU callbacks from the corresponding CPU.
+To reduce its OS jitter, do at least one of the following:
+1.     Use affinity, cgroups, or other mechanism to force these kthreads
+       to execute on some other CPU.
+2.     Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
+       kthreads from being created in the first place.  However, please
+       note that this will not eliminate OS jitter, but will instead
+       shift it to RCU_SOFTIRQ.
+
+Name: watchdog/%u
+Purpose: Detect software lockups on each CPU.
+To reduce its OS jitter, do at least one of the following:
+1.     Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
+       kthreads from being created in the first place.
+2.     Echo a zero to /proc/sys/kernel/watchdog to disable the
+       watchdog timer.
+3.     Echo a large number of /proc/sys/kernel/watchdog_thresh in
+       order to reduce the frequency of OS jitter due to the watchdog
+       timer down to a level that is acceptable for your workload.
index 97d45f276fe6e235801b8e1584c9609574d9cc4e..eaf32a1fd0b1095f6c42b707476faf5c1636068c 100644 (file)
@@ -80,8 +80,6 @@ Valid names are:
   /dev/sdd: -> 0x0830 (forth SCSI disk)
   /dev/sde: -> 0x0840 (fifth SCSI disk)
   /dev/fd : -> 0x0200 (floppy disk)
-  /dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k)
-  /dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k)
 
   The name must be followed by a decimal number, that stands for the
 partition number. Internally, the value of the number is just
index 286a5680f49051bd9a4abb3dac827af97a1d8f3e..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 (file)
@@ -1 +0,0 @@
-ifenslave
index 258d9b92c36f78780fdb7f04e767158192361258..32dfbd924121f13747f220d692b0ad61a3a507d2 100644 (file)
@@ -88,8 +88,6 @@ gianfar.txt
        - Gianfar Ethernet Driver.
 ieee802154.txt
        - Linux IEEE 802.15.4 implementation, API and drivers
-ifenslave.c
-       - Configure network interfaces for parallel routing (bonding).
 igb.txt
        - README for the Intel Gigabit Ethernet Driver (igb).
 igbvf.txt
index 24c308dd3fd183f70e8d151fb19a59aa475de02e..0aa1ac98fc2b1b1a9325f066879216d56ae7e1db 100644 (file)
@@ -1,11 +1,6 @@
 # kbuild trick to avoid linker error. Can be omitted if a module is built.
 obj- := dummy.o
 
-# List of programs to build
-hostprogs-y := ifenslave
-
-HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
-
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 
index 10a015c384b844b946ae8d7b71b81abc7fd33c79..87bbcfee2e067348c58aead44d54110112dd4a40 100644 (file)
@@ -104,8 +104,7 @@ Table of Contents
 ==============================
 
        Most popular distro kernels ship with the bonding driver
-already available as a module and the ifenslave user level control
-program installed and ready for use. If your distro does not, or you
+already available as a module. If your distro does not, or you
 have need to compile bonding from source (e.g., configuring and
 installing a mainline kernel from kernel.org), you'll need to perform
 the following steps:
@@ -124,46 +123,13 @@ device support" section.  It is recommended that you configure the
 driver as module since it is currently the only way to pass parameters
 to the driver or configure more than one bonding device.
 
-       Build and install the new kernel and modules, then continue
-below to install ifenslave.
+       Build and install the new kernel and modules.
 
-1.2 Install ifenslave Control Utility
+1.2 Bonding Control Utility
 -------------------------------------
 
-       The ifenslave user level control program is included in the
-kernel source tree, in the file Documentation/networking/ifenslave.c.
-It is generally recommended that you use the ifenslave that
-corresponds to the kernel that you are using (either from the same
-source tree or supplied with the distro), however, ifenslave
-executables from older kernels should function (but features newer
-than the ifenslave release are not supported).  Running an ifenslave
-that is newer than the kernel is not supported, and may or may not
-work.
-
-       To install ifenslave, do the following:
-
-# gcc -Wall -O -I/usr/src/linux/include ifenslave.c -o ifenslave
-# cp ifenslave /sbin/ifenslave
-
-       If your kernel source is not in "/usr/src/linux," then replace
-"/usr/src/linux/include" in the above with the location of your kernel
-source include directory.
-
-       You may wish to back up any existing /sbin/ifenslave, or, for
-testing or informal use, tag the ifenslave to the kernel version
-(e.g., name the ifenslave executable /sbin/ifenslave-2.6.10).
-
-IMPORTANT NOTE:
-
-       If you omit the "-I" or specify an incorrect directory, you
-may end up with an ifenslave that is incompatible with the kernel
-you're trying to build it for.  Some distros (e.g., Red Hat from 7.1
-onwards) do not have /usr/include/linux symbolically linked to the
-default kernel source include directory.
-
-SECOND IMPORTANT NOTE:
-       If you plan to configure bonding using sysfs or using the
-/etc/network/interfaces file, you do not need to use ifenslave.
+        It is recommended to configure bonding via iproute2 (netlink)
+or sysfs, the old ifenslave control utility is obsolete.
 
 2. Bonding Driver Options
 =========================
@@ -337,6 +303,12 @@ arp_validate
        such a situation, validation of backup slaves must be
        disabled.
 
+       The validation of ARP requests on backup slaves is mainly
+       helping bonding to decide which slaves are more likely to
+       work in case of the active slave failure, it doesn't really
+       guarantee that the backup slave will work if it's selected
+       as the next active slave.
+
        This option is useful in network configurations in which
        multiple bonding hosts are concurrently issuing ARPs to one or
        more targets beyond a common switch.  Should the link between
@@ -349,6 +321,25 @@ arp_validate
 
        This option was added in bonding version 3.1.0.
 
+arp_all_targets
+
+       Specifies the quantity of arp_ip_targets that must be reachable
+       in order for the ARP monitor to consider a slave as being up.
+       This option affects only active-backup mode for slaves with
+       arp_validation enabled.
+
+       Possible values are:
+
+       any or 0
+
+               consider the slave up only when any of the arp_ip_targets
+               is reachable
+
+       all or 1
+
+               consider the slave up only when all of the arp_ip_targets
+               are reachable
+
 downdelay
 
        Specifies the time, in milliseconds, to wait before disabling
@@ -851,7 +842,7 @@ resend_igmp
 ==============================
 
        You can configure bonding using either your distro's network
-initialization scripts, or manually using either ifenslave or the
+initialization scripts, or manually using either iproute2 or the
 sysfs interface.  Distros generally use one of three packages for the
 network initialization scripts: initscripts, sysconfig or interfaces.
 Recent versions of these packages have support for bonding, while older
@@ -1160,7 +1151,7 @@ not support this method for specifying multiple bonding interfaces; for
 those instances, see the "Configuring Multiple Bonds Manually" section,
 below.
 
-3.3 Configuring Bonding Manually with Ifenslave
+3.3 Configuring Bonding Manually with iproute2
 -----------------------------------------------
 
        This section applies to distros whose network initialization
@@ -1171,7 +1162,7 @@ version 8.
        The general method for these systems is to place the bonding
 module parameters into a config file in /etc/modprobe.d/ (as
 appropriate for the installed distro), then add modprobe and/or
-ifenslave commands to the system's global init script.  The name of
+`ip link` commands to the system's global init script.  The name of
 the global init script differs; for sysconfig, it is
 /etc/init.d/boot.local and for initscripts it is /etc/rc.d/rc.local.
 
@@ -1183,8 +1174,8 @@ reboots, edit the appropriate file (/etc/init.d/boot.local or
 modprobe bonding mode=balance-alb miimon=100
 modprobe e100
 ifconfig bond0 192.168.1.1 netmask 255.255.255.0 up
-ifenslave bond0 eth0
-ifenslave bond0 eth1
+ip link set eth0 master bond0
+ip link set eth1 master bond0
 
        Replace the example bonding module parameters and bond0
 network configuration (IP address, netmask, etc) with the appropriate
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
deleted file mode 100644 (file)
index ac5debb..0000000
+++ /dev/null
@@ -1,1105 +0,0 @@
-/* Mode: C;
- * ifenslave.c: Configure network interfaces for parallel routing.
- *
- *     This program controls the Linux implementation of running multiple
- *     network interfaces in parallel.
- *
- * Author:     Donald Becker <becker@cesdis.gsfc.nasa.gov>
- *             Copyright 1994-1996 Donald Becker
- *
- *             This program is free software; you can redistribute it
- *             and/or modify it under the terms of the GNU General Public
- *             License as published by the Free Software Foundation.
- *
- *     The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
- *     Center of Excellence in Space Data and Information Sciences
- *        Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
- *
- *  Changes :
- *    - 2000/10/02 Willy Tarreau <willy at meta-x.org> :
- *       - few fixes. Master's MAC address is now correctly taken from
- *         the first device when not previously set ;
- *       - detach support : call BOND_RELEASE to detach an enslaved interface.
- *       - give a mini-howto from command-line help : # ifenslave -h
- *
- *    - 2001/02/16 Chad N. Tindel <ctindel at ieee dot org> :
- *       - Master is now brought down before setting the MAC address.  In
- *         the 2.4 kernel you can't change the MAC address while the device is
- *         up because you get EBUSY.
- *
- *    - 2001/09/13 Takao Indoh <indou dot takao at jp dot fujitsu dot com>
- *       - Added the ability to change the active interface on a mode 1 bond
- *         at runtime.
- *
- *    - 2001/10/23 Chad N. Tindel <ctindel at ieee dot org> :
- *       - No longer set the MAC address of the master.  The bond device will
- *         take care of this itself
- *       - Try the SIOC*** versions of the bonding ioctls before using the
- *         old versions
- *    - 2002/02/18 Erik Habbinga <erik_habbinga @ hp dot com> :
- *       - ifr2.ifr_flags was not initialized in the hwaddr_notset case,
- *         SIOCGIFFLAGS now called before hwaddr_notset test
- *
- *    - 2002/10/31 Tony Cureington <tony.cureington * hp_com> :
- *       - If the master does not have a hardware address when the first slave
- *         is enslaved, the master is assigned the hardware address of that
- *         slave - there is a comment in bonding.c stating "ifenslave takes
- *         care of this now." This corrects the problem of slaves having
- *         different hardware addresses in active-backup mode when
- *         multiple interfaces are specified on a single ifenslave command
- *         (ifenslave bond0 eth0 eth1).
- *
- *    - 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and
- *                   Shmulik Hen <shmulik.hen at intel dot com>
- *       - Moved setting the slave's mac address and openning it, from
- *         the application to the driver. This enables support of modes
- *         that need to use the unique mac address of each slave.
- *         The driver also takes care of closing the slave and restoring its
- *         original mac address upon release.
- *         In addition, block possibility of enslaving before the master is up.
- *         This prevents putting the system in an undefined state.
- *
- *    - 2003/05/01 - Amir Noam <amir.noam at intel dot com>
- *       - Added ABI version control to restore compatibility between
- *         new/old ifenslave and new/old bonding.
- *       - Prevent adding an adapter that is already a slave.
- *         Fixes the problem of stalling the transmission and leaving
- *         the slave in a down state.
- *
- *    - 2003/05/01 - Shmulik Hen <shmulik.hen at intel dot com>
- *       - Prevent enslaving if the bond device is down.
- *         Fixes the problem of leaving the system in unstable state and
- *         halting when trying to remove the module.
- *       - Close socket on all abnormal exists.
- *       - Add versioning scheme that follows that of the bonding driver.
- *         current version is 1.0.0 as a base line.
- *
- *    - 2003/05/22 - Jay Vosburgh <fubar at us dot ibm dot com>
- *      - ifenslave -c was broken; it's now fixed
- *      - Fixed problem with routes vanishing from master during enslave
- *        processing.
- *
- *    - 2003/05/27 - Amir Noam <amir.noam at intel dot com>
- *      - Fix backward compatibility issues:
- *        For drivers not using ABI versions, slave was set down while
- *        it should be left up before enslaving.
- *        Also, master was not set down and the default set_mac_address()
- *        would fail and generate an error message in the system log.
- *      - For opt_c: slave should not be set to the master's setting
- *        while it is running. It was already set during enslave. To
- *        simplify things, it is now handled separately.
- *
- *    - 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
- *      - Code cleanup and style changes
- *        set version to 1.1.0
- */
-
-#define APP_VERSION    "1.1.0"
-#define APP_RELDATE    "December 1, 2003"
-#define APP_NAME       "ifenslave"
-
-static char *version =
-APP_NAME ".c:v" APP_VERSION " (" APP_RELDATE ")\n"
-"o Donald Becker (becker@cesdis.gsfc.nasa.gov).\n"
-"o Detach support added on 2000/10/02 by Willy Tarreau (willy at meta-x.org).\n"
-"o 2.4 kernel support added on 2001/02/16 by Chad N. Tindel\n"
-"  (ctindel at ieee dot org).\n";
-
-static const char *usage_msg =
-"Usage: ifenslave [-f] <master-if> <slave-if> [<slave-if>...]\n"
-"       ifenslave -d   <master-if> <slave-if> [<slave-if>...]\n"
-"       ifenslave -c   <master-if> <slave-if>\n"
-"       ifenslave --help\n";
-
-static const char *help_msg =
-"\n"
-"       To create a bond device, simply follow these three steps :\n"
-"       - ensure that the required drivers are properly loaded :\n"
-"         # modprobe bonding ; modprobe <3c59x|eepro100|pcnet32|tulip|...>\n"
-"       - assign an IP address to the bond device :\n"
-"         # ifconfig bond0 <addr> netmask <mask> broadcast <bcast>\n"
-"       - attach all the interfaces you need to the bond device :\n"
-"         # ifenslave [{-f|--force}] bond0 eth0 [eth1 [eth2]...]\n"
-"         If bond0 didn't have a MAC address, it will take eth0's. Then, all\n"
-"         interfaces attached AFTER this assignment will get the same MAC addr.\n"
-"         (except for ALB/TLB modes)\n"
-"\n"
-"       To set the bond device down and automatically release all the slaves :\n"
-"         # ifconfig bond0 down\n"
-"\n"
-"       To detach a dead interface without setting the bond device down :\n"
-"         # ifenslave {-d|--detach} bond0 eth0 [eth1 [eth2]...]\n"
-"\n"
-"       To change active slave :\n"
-"         # ifenslave {-c|--change-active} bond0 eth0\n"
-"\n"
-"       To show master interface info\n"
-"         # ifenslave bond0\n"
-"\n"
-"       To show all interfaces info\n"
-"       # ifenslave {-a|--all-interfaces}\n"
-"\n"
-"       To be more verbose\n"
-"       # ifenslave {-v|--verbose} ...\n"
-"\n"
-"       # ifenslave {-u|--usage}   Show usage\n"
-"       # ifenslave {-V|--version} Show version\n"
-"       # ifenslave {-h|--help}    This message\n"
-"\n";
-
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <ctype.h>
-#include <string.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/ioctl.h>
-#include <linux/if.h>
-#include <net/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/if_bonding.h>
-#include <linux/sockios.h>
-
-typedef unsigned long long u64;        /* hack, so we may include kernel's ethtool.h */
-typedef __uint32_t u32;                /* ditto */
-typedef __uint16_t u16;                /* ditto */
-typedef __uint8_t u8;          /* ditto */
-#include <linux/ethtool.h>
-
-struct option longopts[] = {
-       /* { name  has_arg  *flag  val } */
-       {"all-interfaces",      0, 0, 'a'},     /* Show all interfaces. */
-       {"change-active",       0, 0, 'c'},     /* Change the active slave.  */
-       {"detach",              0, 0, 'd'},     /* Detach a slave interface. */
-       {"force",               0, 0, 'f'},     /* Force the operation. */
-       {"help",                0, 0, 'h'},     /* Give help */
-       {"usage",               0, 0, 'u'},     /* Give usage */
-       {"verbose",             0, 0, 'v'},     /* Report each action taken. */
-       {"version",             0, 0, 'V'},     /* Emit version information. */
-       { 0, 0, 0, 0}
-};
-
-/* Command-line flags. */
-unsigned int
-opt_a = 0,     /* Show-all-interfaces flag. */
-opt_c = 0,     /* Change-active-slave flag. */
-opt_d = 0,     /* Detach a slave interface. */
-opt_f = 0,     /* Force the operation. */
-opt_h = 0,     /* Help */
-opt_u = 0,     /* Usage */
-opt_v = 0,     /* Verbose flag. */
-opt_V = 0;     /* Version */
-
-int skfd = -1;         /* AF_INET socket for ioctl() calls.*/
-int abi_ver = 0;       /* userland - kernel ABI version */
-int hwaddr_set = 0;    /* Master's hwaddr is set */
-int saved_errno;
-
-struct ifreq master_mtu, master_flags, master_hwaddr;
-struct ifreq slave_mtu, slave_flags, slave_hwaddr;
-
-struct dev_ifr {
-       struct ifreq *req_ifr;
-       char *req_name;
-       int req_type;
-};
-
-struct dev_ifr master_ifra[] = {
-       {&master_mtu,     "SIOCGIFMTU",     SIOCGIFMTU},
-       {&master_flags,   "SIOCGIFFLAGS",   SIOCGIFFLAGS},
-       {&master_hwaddr,  "SIOCGIFHWADDR",  SIOCGIFHWADDR},
-       {NULL, "", 0}
-};
-
-struct dev_ifr slave_ifra[] = {
-       {&slave_mtu,     "SIOCGIFMTU",     SIOCGIFMTU},
-       {&slave_flags,   "SIOCGIFFLAGS",   SIOCGIFFLAGS},
-       {&slave_hwaddr,  "SIOCGIFHWADDR",  SIOCGIFHWADDR},
-       {NULL, "", 0}
-};
-
-static void if_print(char *ifname);
-static int get_drv_info(char *master_ifname);
-static int get_if_settings(char *ifname, struct dev_ifr ifra[]);
-static int get_slave_flags(char *slave_ifname);
-static int set_master_hwaddr(char *master_ifname, struct sockaddr *hwaddr);
-static int set_slave_hwaddr(char *slave_ifname, struct sockaddr *hwaddr);
-static int set_slave_mtu(char *slave_ifname, int mtu);
-static int set_if_flags(char *ifname, short flags);
-static int set_if_up(char *ifname, short flags);
-static int set_if_down(char *ifname, short flags);
-static int clear_if_addr(char *ifname);
-static int set_if_addr(char *master_ifname, char *slave_ifname);
-static int change_active(char *master_ifname, char *slave_ifname);
-static int enslave(char *master_ifname, char *slave_ifname);
-static int release(char *master_ifname, char *slave_ifname);
-#define v_print(fmt, args...)  \
-       if (opt_v)              \
-               fprintf(stderr, fmt, ## args )
-
-int main(int argc, char *argv[])
-{
-       char **spp, *master_ifname, *slave_ifname;
-       int c, i, rv;
-       int res = 0;
-       int exclusive = 0;
-
-       while ((c = getopt_long(argc, argv, "acdfhuvV", longopts, 0)) != EOF) {
-               switch (c) {
-               case 'a': opt_a++; exclusive++; break;
-               case 'c': opt_c++; exclusive++; break;
-               case 'd': opt_d++; exclusive++; break;
-               case 'f': opt_f++; exclusive++; break;
-               case 'h': opt_h++; exclusive++; break;
-               case 'u': opt_u++; exclusive++; break;
-               case 'v': opt_v++; break;
-               case 'V': opt_V++; exclusive++; break;
-
-               case '?':
-                       fprintf(stderr, "%s", usage_msg);
-                       res = 2;
-                       goto out;
-               }
-       }
-
-       /* options check */
-       if (exclusive > 1) {
-               fprintf(stderr, "%s", usage_msg);
-               res = 2;
-               goto out;
-       }
-
-       if (opt_v || opt_V) {
-               printf("%s", version);
-               if (opt_V) {
-                       res = 0;
-                       goto out;
-               }
-       }
-
-       if (opt_u) {
-               printf("%s", usage_msg);
-               res = 0;
-               goto out;
-       }
-
-       if (opt_h) {
-               printf("%s", usage_msg);
-               printf("%s", help_msg);
-               res = 0;
-               goto out;
-       }
-
-       /* Open a basic socket */
-       if ((skfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
-               perror("socket");
-               res = 1;
-               goto out;
-       }
-
-       if (opt_a) {
-               if (optind == argc) {
-                       /* No remaining args */
-                       /* show all interfaces */
-                       if_print((char *)NULL);
-                       goto out;
-               } else {
-                       /* Just show usage */
-                       fprintf(stderr, "%s", usage_msg);
-                       res = 2;
-                       goto out;
-               }
-       }
-
-       /* Copy the interface name */
-       spp = argv + optind;
-       master_ifname = *spp++;
-
-       if (master_ifname == NULL) {
-               fprintf(stderr, "%s", usage_msg);
-               res = 2;
-               goto out;
-       }
-
-       /* exchange abi version with bonding module */
-       res = get_drv_info(master_ifname);
-       if (res) {
-               fprintf(stderr,
-                       "Master '%s': Error: handshake with driver failed. "
-                       "Aborting\n",
-                       master_ifname);
-               goto out;
-       }
-
-       slave_ifname = *spp++;
-
-       if (slave_ifname == NULL) {
-               if (opt_d || opt_c) {
-                       fprintf(stderr, "%s", usage_msg);
-                       res = 2;
-                       goto out;
-               }
-
-               /* A single arg means show the
-                * configuration for this interface
-                */
-               if_print(master_ifname);
-               goto out;
-       }
-
-       res = get_if_settings(master_ifname, master_ifra);
-       if (res) {
-               /* Probably a good reason not to go on */
-               fprintf(stderr,
-                       "Master '%s': Error: get settings failed: %s. "
-                       "Aborting\n",
-                       master_ifname, strerror(res));
-               goto out;
-       }
-
-       /* check if master is indeed a master;
-        * if not then fail any operation
-        */
-       if (!(master_flags.ifr_flags & IFF_MASTER)) {
-               fprintf(stderr,
-                       "Illegal operation; the specified interface '%s' "
-                       "is not a master. Aborting\n",
-                       master_ifname);
-               res = 1;
-               goto out;
-       }
-
-       /* check if master is up; if not then fail any operation */
-       if (!(master_flags.ifr_flags & IFF_UP)) {
-               fprintf(stderr,
-                       "Illegal operation; the specified master interface "
-                       "'%s' is not up.\n",
-                       master_ifname);
-               res = 1;
-               goto out;
-       }
-
-       /* Only for enslaving */
-       if (!opt_c && !opt_d) {
-               sa_family_t master_family = master_hwaddr.ifr_hwaddr.sa_family;
-               unsigned char *hwaddr =
-                       (unsigned char *)master_hwaddr.ifr_hwaddr.sa_data;
-
-               /* The family '1' is ARPHRD_ETHER for ethernet. */
-               if (master_family != 1 && !opt_f) {
-                       fprintf(stderr,
-                               "Illegal operation: The specified master "
-                               "interface '%s' is not ethernet-like.\n "
-                               "This program is designed to work with "
-                               "ethernet-like network interfaces.\n "
-                               "Use the '-f' option to force the "
-                               "operation.\n",
-                               master_ifname);
-                       res = 1;
-                       goto out;
-               }
-
-               /* Check master's hw addr */
-               for (i = 0; i < 6; i++) {
-                       if (hwaddr[i] != 0) {
-                               hwaddr_set = 1;
-                               break;
-                       }
-               }
-
-               if (hwaddr_set) {
-                       v_print("current hardware address of master '%s' "
-                               "is %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
-                               "type %d\n",
-                               master_ifname,
-                               hwaddr[0], hwaddr[1],
-                               hwaddr[2], hwaddr[3],
-                               hwaddr[4], hwaddr[5],
-                               master_family);
-               }
-       }
-
-       /* Accepts only one slave */
-       if (opt_c) {
-               /* change active slave */
-               res = get_slave_flags(slave_ifname);
-               if (res) {
-                       fprintf(stderr,
-                               "Slave '%s': Error: get flags failed. "
-                               "Aborting\n",
-                               slave_ifname);
-                       goto out;
-               }
-               res = change_active(master_ifname, slave_ifname);
-               if (res) {
-                       fprintf(stderr,
-                               "Master '%s', Slave '%s': Error: "
-                               "Change active failed\n",
-                               master_ifname, slave_ifname);
-               }
-       } else {
-               /* Accept multiple slaves */
-               do {
-                       if (opt_d) {
-                               /* detach a slave interface from the master */
-                               rv = get_slave_flags(slave_ifname);
-                               if (rv) {
-                                       /* Can't work with this slave. */
-                                       /* remember the error and skip it*/
-                                       fprintf(stderr,
-                                               "Slave '%s': Error: get flags "
-                                               "failed. Skipping\n",
-                                               slave_ifname);
-                                       res = rv;
-                                       continue;
-                               }
-                               rv = release(master_ifname, slave_ifname);
-                               if (rv) {
-                                       fprintf(stderr,
-                                               "Master '%s', Slave '%s': Error: "
-                                               "Release failed\n",
-                                               master_ifname, slave_ifname);
-                                       res = rv;
-                               }
-                       } else {
-                               /* attach a slave interface to the master */
-                               rv = get_if_settings(slave_ifname, slave_ifra);
-                               if (rv) {
-                                       /* Can't work with this slave. */
-                                       /* remember the error and skip it*/
-                                       fprintf(stderr,
-                                               "Slave '%s': Error: get "
-                                               "settings failed: %s. "
-                                               "Skipping\n",
-                                               slave_ifname, strerror(rv));
-                                       res = rv;
-                                       continue;
-                               }
-                               rv = enslave(master_ifname, slave_ifname);
-                               if (rv) {
-                                       fprintf(stderr,
-                                               "Master '%s', Slave '%s': Error: "
-                                               "Enslave failed\n",
-                                               master_ifname, slave_ifname);
-                                       res = rv;
-                               }
-                       }
-               } while ((slave_ifname = *spp++) != NULL);
-       }
-
-out:
-       if (skfd >= 0) {
-               close(skfd);
-       }
-
-       return res;
-}
-
-static short mif_flags;
-
-/* Get the inteface configuration from the kernel. */
-static int if_getconfig(char *ifname)
-{
-       struct ifreq ifr;
-       int metric, mtu;        /* Parameters of the master interface. */
-       struct sockaddr dstaddr, broadaddr, netmask;
-       unsigned char *hwaddr;
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFFLAGS, &ifr) < 0)
-               return -1;
-       mif_flags = ifr.ifr_flags;
-       printf("The result of SIOCGIFFLAGS on %s is %x.\n",
-              ifname, ifr.ifr_flags);
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFADDR, &ifr) < 0)
-               return -1;
-       printf("The result of SIOCGIFADDR is %2.2x.%2.2x.%2.2x.%2.2x.\n",
-              ifr.ifr_addr.sa_data[0], ifr.ifr_addr.sa_data[1],
-              ifr.ifr_addr.sa_data[2], ifr.ifr_addr.sa_data[3]);
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFHWADDR, &ifr) < 0)
-               return -1;
-
-       /* Gotta convert from 'char' to unsigned for printf(). */
-       hwaddr = (unsigned char *)ifr.ifr_hwaddr.sa_data;
-       printf("The result of SIOCGIFHWADDR is type %d  "
-              "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
-              ifr.ifr_hwaddr.sa_family, hwaddr[0], hwaddr[1],
-              hwaddr[2], hwaddr[3], hwaddr[4], hwaddr[5]);
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFMETRIC, &ifr) < 0) {
-               metric = 0;
-       } else
-               metric = ifr.ifr_metric;
-       printf("The result of SIOCGIFMETRIC is %d\n", metric);
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0)
-               mtu = 0;
-       else
-               mtu = ifr.ifr_mtu;
-       printf("The result of SIOCGIFMTU is %d\n", mtu);
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) {
-               memset(&dstaddr, 0, sizeof(struct sockaddr));
-       } else
-               dstaddr = ifr.ifr_dstaddr;
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFBRDADDR, &ifr) < 0) {
-               memset(&broadaddr, 0, sizeof(struct sockaddr));
-       } else
-               broadaddr = ifr.ifr_broadaddr;
-
-       strcpy(ifr.ifr_name, ifname);
-       if (ioctl(skfd, SIOCGIFNETMASK, &ifr) < 0) {
-               memset(&netmask, 0, sizeof(struct sockaddr));
-       } else
-               netmask = ifr.ifr_netmask;
-
-       return 0;
-}
-
-static void if_print(char *ifname)
-{
-       char buff[1024];
-       struct ifconf ifc;
-       struct ifreq *ifr;
-       int i;
-
-       if (ifname == (char *)NULL) {
-               ifc.ifc_len = sizeof(buff);
-               ifc.ifc_buf = buff;
-               if (ioctl(skfd, SIOCGIFCONF, &ifc) < 0) {
-                       perror("SIOCGIFCONF failed");
-                       return;
-               }
-
-               ifr = ifc.ifc_req;
-               for (i = ifc.ifc_len / sizeof(struct ifreq); --i >= 0; ifr++) {
-                       if (if_getconfig(ifr->ifr_name) < 0) {
-                               fprintf(stderr,
-                                       "%s: unknown interface.\n",
-                                       ifr->ifr_name);
-                               continue;
-                       }
-
-                       if (((mif_flags & IFF_UP) == 0) && !opt_a) continue;
-                       /*ife_print(&ife);*/
-               }
-       } else {
-               if (if_getconfig(ifname) < 0) {
-                       fprintf(stderr,
-                               "%s: unknown interface.\n", ifname);
-               }
-       }
-}
-
-static int get_drv_info(char *master_ifname)
-{
-       struct ifreq ifr;
-       struct ethtool_drvinfo info;
-       char *endptr;
-
-       memset(&ifr, 0, sizeof(ifr));
-       strncpy(ifr.ifr_name, master_ifname, IFNAMSIZ);
-       ifr.ifr_data = (caddr_t)&info;
-
-       info.cmd = ETHTOOL_GDRVINFO;
-       strncpy(info.driver, "ifenslave", 32);
-       snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION);
-
-       if (ioctl(skfd, SIOCETHTOOL, &ifr) < 0) {
-               if (errno == EOPNOTSUPP) {
-                       goto out;
-               }
-
-               saved_errno = errno;
-               v_print("Master '%s': Error: get bonding info failed %s\n",
-                       master_ifname, strerror(saved_errno));
-               return 1;
-       }
-
-       abi_ver = strtoul(info.fw_version, &endptr, 0);
-       if (*endptr) {
-                v_print("Master '%s': Error: got invalid string as an ABI "
-                       "version from the bonding module\n",
-                       master_ifname);
-               return 1;
-       }
-
-out:
-       v_print("ABI ver is %d\n", abi_ver);
-
-       return 0;
-}
-
-static int change_active(char *master_ifname, char *slave_ifname)
-{
-       struct ifreq ifr;
-       int res = 0;
-
-       if (!(slave_flags.ifr_flags & IFF_SLAVE)) {
-               fprintf(stderr,
-                       "Illegal operation: The specified slave interface "
-                       "'%s' is not a slave\n",
-                       slave_ifname);
-               return 1;
-       }
-
-       strncpy(ifr.ifr_name, master_ifname, IFNAMSIZ);
-       strncpy(ifr.ifr_slave, slave_ifname, IFNAMSIZ);
-       if ((ioctl(skfd, SIOCBONDCHANGEACTIVE, &ifr) < 0) &&
-           (ioctl(skfd, BOND_CHANGE_ACTIVE_OLD, &ifr) < 0)) {
-               saved_errno = errno;
-               v_print("Master '%s': Error: SIOCBONDCHANGEACTIVE failed: "
-                       "%s\n",
-                       master_ifname, strerror(saved_errno));
-               res = 1;
-       }
-
-       return res;
-}
-
-static int enslave(char *master_ifname, char *slave_ifname)
-{
-       struct ifreq ifr;
-       int res = 0;
-
-       if (slave_flags.ifr_flags & IFF_SLAVE) {
-               fprintf(stderr,
-                       "Illegal operation: The specified slave interface "
-                       "'%s' is already a slave\n",
-                       slave_ifname);
-               return 1;
-       }
-
-       res = set_if_down(slave_ifname, slave_flags.ifr_flags);
-       if (res) {
-               fprintf(stderr,
-                       "Slave '%s': Error: bring interface down failed\n",
-                       slave_ifname);
-               return res;
-       }
-
-       if (abi_ver < 2) {
-               /* Older bonding versions would panic if the slave has no IP
-                * address, so get the IP setting from the master.
-                */
-               set_if_addr(master_ifname, slave_ifname);
-       } else {
-               res = clear_if_addr(slave_ifname);
-               if (res) {
-                       fprintf(stderr,
-                               "Slave '%s': Error: clear address failed\n",
-                               slave_ifname);
-                       return res;
-               }
-       }
-
-       if (master_mtu.ifr_mtu != slave_mtu.ifr_mtu) {
-               res = set_slave_mtu(slave_ifname, master_mtu.ifr_mtu);
-               if (res) {
-                       fprintf(stderr,
-                               "Slave '%s': Error: set MTU failed\n",
-                               slave_ifname);
-                       return res;
-               }
-       }
-
-       if (hwaddr_set) {
-               /* Master already has an hwaddr
-                * so set it's hwaddr to the slave
-                */
-               if (abi_ver < 1) {
-                       /* The driver is using an old ABI, so
-                        * the application sets the slave's
-                        * hwaddr
-                        */
-                       res = set_slave_hwaddr(slave_ifname,
-                                              &(master_hwaddr.ifr_hwaddr));
-                       if (res) {
-                               fprintf(stderr,
-                                       "Slave '%s': Error: set hw address "
-                                       "failed\n",
-                                       slave_ifname);
-                               goto undo_mtu;
-                       }
-
-                       /* For old ABI the application needs to bring the
-                        * slave back up
-                        */
-                       res = set_if_up(slave_ifname, slave_flags.ifr_flags);
-                       if (res) {
-                               fprintf(stderr,
-                                       "Slave '%s': Error: bring interface "
-                                       "down failed\n",
-                                       slave_ifname);
-                               goto undo_slave_mac;
-                       }
-               }
-               /* The driver is using a new ABI,
-                * so the driver takes care of setting
-                * the slave's hwaddr and bringing
-                * it up again
-                */
-       } else {
-               /* No hwaddr for master yet, so
-                * set the slave's hwaddr to it
-                */
-               if (abi_ver < 1) {
-                       /* For old ABI, the master needs to be
-                        * down before setting its hwaddr
-                        */
-                       res = set_if_down(master_ifname, master_flags.ifr_flags);
-                       if (res) {
-                               fprintf(stderr,
-                                       "Master '%s': Error: bring interface "
-                                       "down failed\n",
-                                       master_ifname);
-                               goto undo_mtu;
-                       }
-               }
-
-               res = set_master_hwaddr(master_ifname,
-                                       &(slave_hwaddr.ifr_hwaddr));
-               if (res) {
-                       fprintf(stderr,
-                               "Master '%s': Error: set hw address "
-                               "failed\n",
-                               master_ifname);
-                       goto undo_mtu;
-               }
-
-               if (abi_ver < 1) {
-                       /* For old ABI, bring the master
-                        * back up
-                        */
-                       res = set_if_up(master_ifname, master_flags.ifr_flags);
-                       if (res) {
-                               fprintf(stderr,
-                                       "Master '%s': Error: bring interface "
-                                       "up failed\n",
-                                       master_ifname);
-                               goto undo_master_mac;
-                       }
-               }
-
-               hwaddr_set = 1;
-       }
-
-       /* Do the real thing */
-       strncpy(ifr.ifr_name, master_ifname, IFNAMSIZ);
-       strncpy(ifr.ifr_slave, slave_ifname, IFNAMSIZ);
-       if ((ioctl(skfd, SIOCBONDENSLAVE, &ifr) < 0) &&
-           (ioctl(skfd, BOND_ENSLAVE_OLD, &ifr) < 0)) {
-               saved_errno = errno;
-               v_print("Master '%s': Error: SIOCBONDENSLAVE failed: %s\n",
-                       master_ifname, strerror(saved_errno));
-               res = 1;
-       }
-
-       if (res) {
-               goto undo_master_mac;
-       }
-
-       return 0;
-
-/* rollback (best effort) */
-undo_master_mac:
-       set_master_hwaddr(master_ifname, &(master_hwaddr.ifr_hwaddr));
-       hwaddr_set = 0;
-       goto undo_mtu;
-undo_slave_mac:
-       set_slave_hwaddr(slave_ifname, &(slave_hwaddr.ifr_hwaddr));
-undo_mtu:
-       set_slave_mtu(slave_ifname, slave_mtu.ifr_mtu);
-       return res;
-}
-
-static int release(char *master_ifname, char *slave_ifname)
-{
-       struct ifreq ifr;
-       int res = 0;
-
-       if (!(slave_flags.ifr_flags & IFF_SLAVE)) {
-               fprintf(stderr,
-                       "Illegal operation: The specified slave interface "
-                       "'%s' is not a slave\n",
-                       slave_ifname);
-               return 1;
-       }
-
-       strncpy(ifr.ifr_name, master_ifname, IFNAMSIZ);
-       strncpy(ifr.ifr_slave, slave_ifname, IFNAMSIZ);
-       if ((ioctl(skfd, SIOCBONDRELEASE, &ifr) < 0) &&
-           (ioctl(skfd, BOND_RELEASE_OLD, &ifr) < 0)) {
-               saved_errno = errno;
-               v_print("Master '%s': Error: SIOCBONDRELEASE failed: %s\n",
-                       master_ifname, strerror(saved_errno));
-               return 1;
-       } else if (abi_ver < 1) {
-               /* The driver is using an old ABI, so we'll set the interface
-                * down to avoid any conflicts due to same MAC/IP
-                */
-               res = set_if_down(slave_ifname, slave_flags.ifr_flags);
-               if (res) {
-                       fprintf(stderr,
-                               "Slave '%s': Error: bring interface "
-                               "down failed\n",
-                               slave_ifname);
-               }
-       }
-
-       /* set to default mtu */
-       set_slave_mtu(slave_ifname, 1500);
-
-       return res;
-}
-
-static int get_if_settings(char *ifname, struct dev_ifr ifra[])
-{
-       int i;
-       int res = 0;
-
-       for (i = 0; ifra[i].req_ifr; i++) {
-               strncpy(ifra[i].req_ifr->ifr_name, ifname, IFNAMSIZ);
-               res = ioctl(skfd, ifra[i].req_type, ifra[i].req_ifr);
-               if (res < 0) {
-                       saved_errno = errno;
-                       v_print("Interface '%s': Error: %s failed: %s\n",
-                               ifname, ifra[i].req_name,
-                               strerror(saved_errno));
-
-                       return saved_errno;
-               }
-       }
-
-       return 0;
-}
-
-static int get_slave_flags(char *slave_ifname)
-{
-       int res = 0;
-
-       strncpy(slave_flags.ifr_name, slave_ifname, IFNAMSIZ);
-       res = ioctl(skfd, SIOCGIFFLAGS, &slave_flags);
-       if (res < 0) {
-               saved_errno = errno;
-               v_print("Slave '%s': Error: SIOCGIFFLAGS failed: %s\n",
-                       slave_ifname, strerror(saved_errno));
-       } else {
-               v_print("Slave %s: flags %04X.\n",
-                       slave_ifname, slave_flags.ifr_flags);
-       }
-
-       return res;
-}
-
-static int set_master_hwaddr(char *master_ifname, struct sockaddr *hwaddr)
-{
-       unsigned char *addr = (unsigned char *)hwaddr->sa_data;
-       struct ifreq ifr;
-       int res = 0;
-
-       strncpy(ifr.ifr_name, master_ifname, IFNAMSIZ);
-       memcpy(&(ifr.ifr_hwaddr), hwaddr, sizeof(struct sockaddr));
-       res = ioctl(skfd, SIOCSIFHWADDR, &ifr);
-       if (res < 0) {
-               saved_errno = errno;
-               v_print("Master '%s': Error: SIOCSIFHWADDR failed: %s\n",
-                       master_ifname, strerror(saved_errno));
-               return res;
-       } else {
-               v_print("Master '%s': hardware address set to "
-                       "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
-                       master_ifname, addr[0], addr[1], addr[2],
-                       addr[3], addr[4], addr[5]);
-       }
-
-       return res;
-}
-
-static int set_slave_hwaddr(char *slave_ifname, struct sockaddr *hwaddr)
-{
-       unsigned char *addr = (unsigned char *)hwaddr->sa_data;
-       struct ifreq ifr;
-       int res = 0;
-
-       strncpy(ifr.ifr_name, slave_ifname, IFNAMSIZ);
-       memcpy(&(ifr.ifr_hwaddr), hwaddr, sizeof(struct sockaddr));
-       res = ioctl(skfd, SIOCSIFHWADDR, &ifr);
-       if (res < 0) {
-               saved_errno = errno;
-
-               v_print("Slave '%s': Error: SIOCSIFHWADDR failed: %s\n",
-                       slave_ifname, strerror(saved_errno));
-
-               if (saved_errno == EBUSY) {
-                       v_print("  The device is busy: it must be idle "
-                               "before running this command.\n");
-               } else if (saved_errno == EOPNOTSUPP) {
-                       v_print("  The device does not support setting "
-                               "the MAC address.\n"
-                               "  Your kernel likely does not support slave "
-                               "devices.\n");
-               } else if (saved_errno == EINVAL) {
-                       v_print("  The device's address type does not match "
-                               "the master's address type.\n");
-               }
-               return res;
-       } else {
-               v_print("Slave '%s': hardware address set to "
-                       "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
-                       slave_ifname, addr[0], addr[1], addr[2],
-                       addr[3], addr[4], addr[5]);
-       }
-
-       return res;
-}
-
-static int set_slave_mtu(char *slave_ifname, int mtu)
-{
-       struct ifreq ifr;
-       int res = 0;
-
-       ifr.ifr_mtu = mtu;
-       strncpy(ifr.ifr_name, slave_ifname, IFNAMSIZ);
-
-       res = ioctl(skfd, SIOCSIFMTU, &ifr);
-       if (res < 0) {
-               saved_errno = errno;
-               v_print("Slave '%s': Error: SIOCSIFMTU failed: %s\n",
-                       slave_ifname, strerror(saved_errno));
-       } else {
-               v_print("Slave '%s': MTU set to %d.\n", slave_ifname, mtu);
-       }
-
-       return res;
-}
-
-static int set_if_flags(char *ifname, short flags)
-{
-       struct ifreq ifr;
-       int res = 0;
-
-       ifr.ifr_flags = flags;
-       strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
-
-       res = ioctl(skfd, SIOCSIFFLAGS, &ifr);
-       if (res < 0) {
-               saved_errno = errno;
-               v_print("Interface '%s': Error: SIOCSIFFLAGS failed: %s\n",
-                       ifname, strerror(saved_errno));
-       } else {
-               v_print("Interface '%s': flags set to %04X.\n", ifname, flags);
-       }
-
-       return res;
-}
-
-static int set_if_up(char *ifname, short flags)
-{
-       return set_if_flags(ifname, flags | IFF_UP);
-}
-
-static int set_if_down(char *ifname, short flags)
-{
-       return set_if_flags(ifname, flags & ~IFF_UP);
-}
-
-static int clear_if_addr(char *ifname)
-{
-       struct ifreq ifr;
-       int res = 0;
-
-       strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
-       ifr.ifr_addr.sa_family = AF_INET;
-       memset(ifr.ifr_addr.sa_data, 0, sizeof(ifr.ifr_addr.sa_data));
-
-       res = ioctl(skfd, SIOCSIFADDR, &ifr);
-       if (res < 0) {
-               saved_errno = errno;
-               v_print("Interface '%s': Error: SIOCSIFADDR failed: %s\n",
-                       ifname, strerror(saved_errno));
-       } else {
-               v_print("Interface '%s': address cleared\n", ifname);
-       }
-
-       return res;
-}
-
-static int set_if_addr(char *master_ifname, char *slave_ifname)
-{
-       struct ifreq ifr;
-       int res;
-       unsigned char *ipaddr;
-       int i;
-       struct {
-               char *req_name;
-               char *desc;
-               int g_ioctl;
-               int s_ioctl;
-       } ifra[] = {
-               {"IFADDR", "addr", SIOCGIFADDR, SIOCSIFADDR},
-               {"DSTADDR", "destination addr", SIOCGIFDSTADDR, SIOCSIFDSTADDR},
-               {"BRDADDR", "broadcast addr", SIOCGIFBRDADDR, SIOCSIFBRDADDR},
-               {"NETMASK", "netmask", SIOCGIFNETMASK, SIOCSIFNETMASK},
-               {NULL, NULL, 0, 0},
-       };
-
-       for (i = 0; ifra[i].req_name; i++) {
-               strncpy(ifr.ifr_name, master_ifname, IFNAMSIZ);
-               res = ioctl(skfd, ifra[i].g_ioctl, &ifr);
-               if (res < 0) {
-                       int saved_errno = errno;
-
-                       v_print("Interface '%s': Error: SIOCG%s failed: %s\n",
-                               master_ifname, ifra[i].req_name,
-                               strerror(saved_errno));
-
-                       ifr.ifr_addr.sa_family = AF_INET;
-                       memset(ifr.ifr_addr.sa_data, 0,
-                              sizeof(ifr.ifr_addr.sa_data));
-               }
-
-               strncpy(ifr.ifr_name, slave_ifname, IFNAMSIZ);
-               res = ioctl(skfd, ifra[i].s_ioctl, &ifr);
-               if (res < 0) {
-                       int saved_errno = errno;
-
-                       v_print("Interface '%s': Error: SIOCS%s failed: %s\n",
-                               slave_ifname, ifra[i].req_name,
-                               strerror(saved_errno));
-
-               }
-
-               ipaddr = (unsigned char *)ifr.ifr_addr.sa_data;
-               v_print("Interface '%s': set IP %s to %d.%d.%d.%d\n",
-                       slave_ifname, ifra[i].desc,
-                       ipaddr[0], ipaddr[1], ipaddr[2], ipaddr[3]);
-       }
-
-       return 0;
-}
-
-/*
- * Local variables:
- *  version-control: t
- *  kept-new-versions: 5
- *  c-indent-level: 4
- *  c-basic-offset: 4
- *  tab-width: 4
- *  compile-command: "gcc -Wall -Wstrict-prototypes -O -I/usr/src/linux/include ifenslave.c -o ifenslave"
- * End:
- */
-
index f98ca633b5282eb838ae1b04f9497e4a4495f198..36e5a402ed0ee32eabb532d7135db0df39619558 100644 (file)
@@ -183,7 +183,7 @@ tcp_early_retrans - INTEGER
        for triggering fast retransmit when the amount of outstanding data is
        small and when no previously unsent data can be transmitted (such
        that limited transmit could be used). Also controls the use of
-       Tail loss probe (TLP) that converts RTOs occuring due to tail
+       Tail loss probe (TLP) that converts RTOs occurring due to tail
        losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01).
        Possible values:
                0 disables ER
@@ -685,6 +685,15 @@ ip_dynaddr - BOOLEAN
        occurs.
        Default: 0
 
+ip_early_demux - BOOLEAN
+       Optimize input packet processing down to one demux for
+       certain kinds of local sockets.  Currently we only do this
+       for established TCP sockets.
+
+       It may add an additional cost for pure routing workloads that
+       reduces overall throughput, in such case you should disable it.
+       Default: 1
+
 icmp_echo_ignore_all - BOOLEAN
        If set non-zero, then the kernel will ignore all ICMP ECHO
        requests sent to it.
@@ -729,7 +738,7 @@ icmp_ignore_bogus_error_responses - BOOLEAN
        frames.  Such violations are normally logged via a kernel warning.
        If this is set to TRUE, the kernel will not give such warnings, which
        will avoid log file clutter.
-       Default: FALSE
+       Default: 1
 
 icmp_errors_use_inbound_ifaddr - BOOLEAN
 
index 1c2dab4096252bd54fc839f5ec435f161f94a537..5cc6005877785a6c1f305d32011175506a32d5e8 100644 (file)
@@ -54,7 +54,7 @@ it will use an allocated socket buffer as usual and the contents will be
  copied to the ring on transmission, nullifying most of the performance gains.
 Dumps of kernel databases automatically support memory mapped I/O.
 
-Conversion of the transmit path involves changing message contruction to
+Conversion of the transmit path involves changing message construction to
 use memory from the TX ring instead of (usually) a buffer declared on the
 stack and setting up the frame header approriately. Optionally poll() can
 be used to wait for free frames in the TX ring.
@@ -65,8 +65,8 @@ Structured and definitions for using memory mapped I/O are contained in
 RX and TX rings
 ----------------
 
-Each ring contains a number of continous memory blocks, containing frames of
-fixed size dependant on the parameters used for ring setup.
+Each ring contains a number of continuous memory blocks, containing frames of
+fixed size dependent on the parameters used for ring setup.
 
 Ring:  [ block 0 ]
                [ frame 0 ]
@@ -80,7 +80,7 @@ Ring: [ block 0 ]
                [ frame 2 * n + 1 ]
 
 The blocks are only visible to the kernel, from the point of view of user-space
-the ring just contains the frames in a continous memory zone.
+the ring just contains the frames in a continuous memory zone.
 
 The ring parameters used for setting up the ring are defined as follows:
 
@@ -91,7 +91,7 @@ struct nl_mmap_req {
        unsigned int    nm_frame_nr;
 };
 
-Frames are grouped into blocks, where each block is a continous region of memory
+Frames are grouped into blocks, where each block is a continuous region of memory
 and holds nm_block_size / nm_frame_size frames. The total number of frames in
 the ring is nm_frame_nr. The following invariants hold:
 
@@ -113,8 +113,8 @@ Some parameters are constrained, specifically:
 
 - nm_frame_nr must equal the actual number of frames as specified above.
 
-When the kernel can't allocate phsyically continous memory for a ring block,
-it will fall back to use physically discontinous memory. This might affect
+When the kernel can't allocate phsyically continuous memory for a ring block,
+it will fall back to use physically discontinuous memory. This might affect
 performance negatively, in order to avoid this the nm_frame_size parameter
 should be chosen to be as small as possible for the required frame size and
 the number of blocks should be increased instead.
@@ -274,9 +274,9 @@ This example assumes some ring parameters of the ring setup are available.
                        /* Get next frame header */
                        hdr = rx_ring + frame_offset;
 
-                       if (hdr->nm_status == NL_MMAP_STATUS_VALID)
+                       if (hdr->nm_status == NL_MMAP_STATUS_VALID) {
                                /* Regular memory mapped frame */
-                               nlh = (void *hdr) + NL_MMAP_HDRLEN;
+                               nlh = (void *)hdr + NL_MMAP_HDRLEN;
                                len = hdr->nm_len;
 
                                /* Release empty message immediately. May happen
index 23dd80e82b8e36e6cf1dfdc595cad845b8885ad1..8572796b1eb64f97560d63a410a972cbcced08fc 100644 (file)
@@ -704,6 +704,12 @@ So it seems to be a good candidate to be used with packet fanout.
 Minimal example code by Daniel Borkmann based on Chetan Loke's lolpcap (compile
 it with gcc -Wall -O2 blob.c, and try things like "./a.out eth0", etc.):
 
+/* Written from scratch, but kernel-to-user space API usage
+ * dissected from lolpcap:
+ *  Copyright 2011, Chetan Loke <loke.chetan@gmail.com>
+ *  License: GPL, version 2.0
+ */
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdint.h>
@@ -722,27 +728,6 @@ it with gcc -Wall -O2 blob.c, and try things like "./a.out eth0", etc.):
 #include <linux/if_ether.h>
 #include <linux/ip.h>
 
-#define BLOCK_SIZE             (1 << 22)
-#define FRAME_SIZE             2048
-
-#define NUM_BLOCKS             64
-#define NUM_FRAMES             ((BLOCK_SIZE * NUM_BLOCKS) / FRAME_SIZE)
-
-#define BLOCK_RETIRE_TOV_IN_MS 64
-#define BLOCK_PRIV_AREA_SZ     13
-
-#define ALIGN_8(x)             (((x) + 8 - 1) & ~(8 - 1))
-
-#define BLOCK_STATUS(x)                ((x)->h1.block_status)
-#define BLOCK_NUM_PKTS(x)      ((x)->h1.num_pkts)
-#define BLOCK_O2FP(x)          ((x)->h1.offset_to_first_pkt)
-#define BLOCK_LEN(x)           ((x)->h1.blk_len)
-#define BLOCK_SNUM(x)          ((x)->h1.seq_num)
-#define BLOCK_O2PRIV(x)                ((x)->offset_to_priv)
-#define BLOCK_PRIV(x)          ((void *) ((uint8_t *) (x) + BLOCK_O2PRIV(x)))
-#define BLOCK_HDR_LEN          (ALIGN_8(sizeof(struct block_desc)))
-#define BLOCK_PLUS_PRIV(sz_pri)        (BLOCK_HDR_LEN + ALIGN_8((sz_pri)))
-
 #ifndef likely
 # define likely(x)             __builtin_expect(!!(x), 1)
 #endif
@@ -765,7 +750,7 @@ struct ring {
 static unsigned long packets_total = 0, bytes_total = 0;
 static sig_atomic_t sigint = 0;
 
-void sighandler(int num)
+static void sighandler(int num)
 {
        sigint = 1;
 }
@@ -774,6 +759,8 @@ static int setup_socket(struct ring *ring, char *netdev)
 {
        int err, i, fd, v = TPACKET_V3;
        struct sockaddr_ll ll;
+       unsigned int blocksiz = 1 << 22, framesiz = 1 << 11;
+       unsigned int blocknum = 64;
 
        fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
        if (fd < 0) {
@@ -788,13 +775,12 @@ static int setup_socket(struct ring *ring, char *netdev)
        }
 
        memset(&ring->req, 0, sizeof(ring->req));
-       ring->req.tp_block_size = BLOCK_SIZE;
-       ring->req.tp_frame_size = FRAME_SIZE;
-       ring->req.tp_block_nr = NUM_BLOCKS;
-       ring->req.tp_frame_nr = NUM_FRAMES;
-       ring->req.tp_retire_blk_tov = BLOCK_RETIRE_TOV_IN_MS;
-       ring->req.tp_sizeof_priv = BLOCK_PRIV_AREA_SZ;
-       ring->req.tp_feature_req_word |= TP_FT_REQ_FILL_RXHASH;
+       ring->req.tp_block_size = blocksiz;
+       ring->req.tp_frame_size = framesiz;
+       ring->req.tp_block_nr = blocknum;
+       ring->req.tp_frame_nr = (blocksiz * blocknum) / framesiz;
+       ring->req.tp_retire_blk_tov = 60;
+       ring->req.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
 
        err = setsockopt(fd, SOL_PACKET, PACKET_RX_RING, &ring->req,
                         sizeof(ring->req));
@@ -804,8 +790,7 @@ static int setup_socket(struct ring *ring, char *netdev)
        }
 
        ring->map = mmap(NULL, ring->req.tp_block_size * ring->req.tp_block_nr,
-                        PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
-                        fd, 0);
+                        PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, fd, 0);
        if (ring->map == MAP_FAILED) {
                perror("mmap");
                exit(1);
@@ -835,58 +820,6 @@ static int setup_socket(struct ring *ring, char *netdev)
        return fd;
 }
 
-#ifdef __checked
-static uint64_t prev_block_seq_num = 0;
-
-void assert_block_seq_num(struct block_desc *pbd)
-{
-       if (unlikely(prev_block_seq_num + 1 != BLOCK_SNUM(pbd))) {
-               printf("prev_block_seq_num:%"PRIu64", expected seq:%"PRIu64" != "
-                      "actual seq:%"PRIu64"\n", prev_block_seq_num,
-                      prev_block_seq_num + 1, (uint64_t) BLOCK_SNUM(pbd));
-               exit(1);
-       }
-
-       prev_block_seq_num = BLOCK_SNUM(pbd);
-}
-
-static void assert_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
-{
-       if (BLOCK_NUM_PKTS(pbd)) {
-               if (unlikely(bytes != BLOCK_LEN(pbd))) {
-                       printf("block:%u with %upackets, expected len:%u != actual len:%u\n",
-                              block_num, BLOCK_NUM_PKTS(pbd), bytes, BLOCK_LEN(pbd));
-                       exit(1);
-               }
-       } else {
-               if (unlikely(BLOCK_LEN(pbd) != BLOCK_PLUS_PRIV(BLOCK_PRIV_AREA_SZ))) {
-                       printf("block:%u, expected len:%lu != actual len:%u\n",
-                              block_num, BLOCK_HDR_LEN, BLOCK_LEN(pbd));
-                       exit(1);
-               }
-       }
-}
-
-static void assert_block_header(struct block_desc *pbd, const int block_num)
-{
-       uint32_t block_status = BLOCK_STATUS(pbd);
-
-       if (unlikely((block_status & TP_STATUS_USER) == 0)) {
-               printf("block:%u, not in TP_STATUS_USER\n", block_num);
-               exit(1);
-       }
-
-       assert_block_seq_num(pbd);
-}
-#else
-static inline void assert_block_header(struct block_desc *pbd, const int block_num)
-{
-}
-static void assert_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
-{
-}
-#endif
-
 static void display(struct tpacket3_hdr *ppd)
 {
        struct ethhdr *eth = (struct ethhdr *) ((uint8_t *) ppd + ppd->tp_mac);
@@ -916,37 +849,27 @@ static void display(struct tpacket3_hdr *ppd)
 
 static void walk_block(struct block_desc *pbd, const int block_num)
 {
-       int num_pkts = BLOCK_NUM_PKTS(pbd), i;
+       int num_pkts = pbd->h1.num_pkts, i;
        unsigned long bytes = 0;
-       unsigned long bytes_with_padding = BLOCK_PLUS_PRIV(BLOCK_PRIV_AREA_SZ);
        struct tpacket3_hdr *ppd;
 
-       assert_block_header(pbd, block_num);
-
-       ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + BLOCK_O2FP(pbd));
+       ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd +
+                                      pbd->h1.offset_to_first_pkt);
        for (i = 0; i < num_pkts; ++i) {
                bytes += ppd->tp_snaplen;
-               if (ppd->tp_next_offset)
-                       bytes_with_padding += ppd->tp_next_offset;
-               else
-                       bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac);
-
                display(ppd);
 
-               ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset);
-               __sync_synchronize();
+               ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd +
+                                              ppd->tp_next_offset);
        }
 
-       assert_block_len(pbd, bytes_with_padding, block_num);
-
        packets_total += num_pkts;
        bytes_total += bytes;
 }
 
-void flush_block(struct block_desc *pbd)
+static void flush_block(struct block_desc *pbd)
 {
-       BLOCK_STATUS(pbd) = TP_STATUS_KERNEL;
-       __sync_synchronize();
+       pbd->h1.block_status = TP_STATUS_KERNEL;
 }
 
 static void teardown_socket(struct ring *ring, int fd)
@@ -962,7 +885,7 @@ int main(int argc, char **argp)
        socklen_t len;
        struct ring ring;
        struct pollfd pfd;
-       unsigned int block_num = 0;
+       unsigned int block_num = 0, blocks = 64;
        struct block_desc *pbd;
        struct tpacket_stats_v3 stats;
 
@@ -984,15 +907,15 @@ int main(int argc, char **argp)
 
        while (likely(!sigint)) {
                pbd = (struct block_desc *) ring.rd[block_num].iov_base;
-retry_block:
-               if ((BLOCK_STATUS(pbd) & TP_STATUS_USER) == 0) {
+
+               if ((pbd->h1.block_status & TP_STATUS_USER) == 0) {
                        poll(&pfd, 1, -1);
-                       goto retry_block;
+                       continue;
                }
 
                walk_block(pbd, block_num);
                flush_block(pbd);
-               block_num = (block_num + 1) % NUM_BLOCKS;
+               block_num = (block_num + 1) % blocks;
        }
 
        len = sizeof(stats);
index 579994afbe067bf9bf6d79bf50c62986dda2765d..ca6977f5b2ed066f49823c0d7c0129a9a16b0820 100644 (file)
@@ -163,6 +163,64 @@ and unnecessary. If there are fewer hardware queues than CPUs, then
 RPS might be beneficial if the rps_cpus for each queue are the ones that
 share the same memory domain as the interrupting CPU for that queue.
 
+==== RPS Flow Limit
+
+RPS scales kernel receive processing across CPUs without introducing
+reordering. The trade-off to sending all packets from the same flow
+to the same CPU is CPU load imbalance if flows vary in packet rate.
+In the extreme case a single flow dominates traffic. Especially on
+common server workloads with many concurrent connections, such
+behavior indicates a problem such as a misconfiguration or spoofed
+source Denial of Service attack.
+
+Flow Limit is an optional RPS feature that prioritizes small flows
+during CPU contention by dropping packets from large flows slightly
+ahead of those from small flows. It is active only when an RPS or RFS
+destination CPU approaches saturation.  Once a CPU's input packet
+queue exceeds half the maximum queue length (as set by sysctl
+net.core.netdev_max_backlog), the kernel starts a per-flow packet
+count over the last 256 packets. If a flow exceeds a set ratio (by
+default, half) of these packets when a new packet arrives, then the
+new packet is dropped. Packets from other flows are still only
+dropped once the input packet queue reaches netdev_max_backlog.
+No packets are dropped when the input packet queue length is below
+the threshold, so flow limit does not sever connections outright:
+even large flows maintain connectivity.
+
+== Interface
+
+Flow limit is compiled in by default (CONFIG_NET_FLOW_LIMIT), but not
+turned on. It is implemented for each CPU independently (to avoid lock
+and cache contention) and toggled per CPU by setting the relevant bit
+in sysctl net.core.flow_limit_cpu_bitmap. It exposes the same CPU
+bitmap interface as rps_cpus (see above) when called from procfs:
+
+ /proc/sys/net/core/flow_limit_cpu_bitmap
+
+Per-flow rate is calculated by hashing each packet into a hashtable
+bucket and incrementing a per-bucket counter. The hash function is
+the same that selects a CPU in RPS, but as the number of buckets can
+be much larger than the number of CPUs, flow limit has finer-grained
+identification of large flows and fewer false positives. The default
+table has 4096 buckets. This value can be modified through sysctl
+
+ net.core.flow_limit_table_len
+
+The value is only consulted when a new table is allocated. Modifying
+it does not update active tables.
+
+== Suggested Configuration
+
+Flow limit is useful on systems with many concurrent connections,
+where a single connection taking up 50% of a CPU indicates a problem.
+In such environments, enable the feature on all CPUs that handle
+network rx interrupts (as set in /proc/irq/N/smp_affinity).
+
+The feature depends on the input packet queue length to exceed
+the flow limit threshold (50%) + the flow history length (256).
+Setting net.core.netdev_max_backlog to either 1000 or 10000
+performed well in experiments.
+
 
 RFS: Receive Flow Steering
 ==========================
index 504dfe4d52eba3541cfa6330d52595d37265a0b7..a66c9821b5cefa5056f2fb202a9acb00a207f8af 100644 (file)
@@ -268,7 +268,7 @@ situations.
 System Power Management Phases
 ------------------------------
 Suspending or resuming the system is done in several phases.  Different phases
-are used for standby or memory sleep states ("suspend-to-RAM") and the
+are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the
 hibernation state ("suspend-to-disk").  Each phase involves executing callbacks
 for every device before the next phase begins.  Not all busses or classes
 support all these callbacks and not all drivers use all the callbacks.  The
@@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one.
 
 Entering System Suspend
 -----------------------
-When the system goes into the standby or memory sleep state, the phases are:
+When the system goes into the freeze, standby or memory sleep state,
+the phases are:
 
                prepare, suspend, suspend_late, suspend_noirq.
 
@@ -368,7 +369,7 @@ the devices that were suspended.
 
 Leaving System Suspend
 ----------------------
-When resuming from standby or memory sleep, the phases are:
+When resuming from freeze, standby or memory sleep, the phases are:
 
                resume_noirq, resume_early, resume, complete.
 
@@ -433,8 +434,8 @@ the system log.
 
 Entering Hibernation
 --------------------
-Hibernating the system is more complicated than putting it into the standby or
-memory sleep state, because it involves creating and saving a system image.
+Hibernating the system is more complicated than putting it into the other
+sleep states, because it involves creating and saving a system image.
 Therefore there are more phases for hibernation, with a different set of
 callbacks.  These phases always run after tasks have been frozen and memory has
 been freed.
@@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state.
 
 At this point the system image is saved, and the devices then need to be
 prepared for the upcoming system shutdown.  This is much like suspending them
-before putting the system into the standby or memory sleep state, and the phases
-are similar.
+before putting the system into the freeze, standby or memory sleep state,
+and the phases are similar.
 
     9. The prepare phase is discussed above.
 
index c537834af00566e0fee8058a9643803d2c2b3954..f1f0f59a7c47d594a9753207d713d0834a0e99b1 100644 (file)
@@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs
 is mounted at /sys). 
 
 /sys/power/state controls system power state. Reading from this file
-returns what states are supported, which is hard-coded to 'standby'
-(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
+returns what states are supported, which is hard-coded to 'freeze',
+'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
 (Suspend-to-Disk). 
 
 Writing to this file one of those strings causes the system to
index c2a4a346c0d98597d3375c5a9f160c012fded709..a81fa254303de0c57ef6e7741dd76ffb0bcfdf02 100644 (file)
@@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose.
 The subsystems or drivers having such needs can register suspend notifiers that
 will be called upon the following events by the PM core:
 
-PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will
-                       be frozen immediately.
+PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen
+                       immediately. This is different from PM_SUSPEND_PREPARE
+                       below because here we do additional work between notifiers
+                       and drivers freezing.
 
 PM_POST_HIBERNATION    The system memory state has been restored from a
                        hibernation image or an error occurred during
index 4416b28630df8d62cbba43e2fedb0cee42d6b3b1..442d43df9b251111df2d677b5e8cc3ef4488c831 100644 (file)
@@ -2,12 +2,26 @@
 System Power Management States
 
 
-The kernel supports three power management states generically, though
-each is dependent on platform support code to implement the low-level
-details for each state. This file describes each state, what they are
+The kernel supports four power management states generically, though
+one is generic and the other three are dependent on platform support
+code to implement the low-level details for each state.
+This file describes each state, what they are
 commonly called, what ACPI state they map to, and what string to write
 to /sys/power/state to enter that state
 
+state:         Freeze / Low-Power Idle
+ACPI state:    S0
+String:                "freeze"
+
+This state is a generic, pure software, light-weight, low-power state.
+It allows more energy to be saved relative to idle by freezing user
+space and putting all I/O devices into low-power states (possibly
+lower-power than available at run time), such that the processors can
+spend more time in their idle states.
+This state can be used for platforms without Standby/Suspend-to-RAM
+support, or it can be used in addition to Suspend-to-RAM (memory sleep)
+to provide reduced resume latency.
+
 
 State:         Standby / Power-On Suspend
 ACPI State:    S1
@@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which
 also offers low power savings, but low resume latency. Not all devices
 support D1, and those that don't are left on. 
 
-A transition from Standby to the On state should take about 1-2
-seconds. 
-
 
 State:         Suspend-to-RAM
 ACPI State:    S3
@@ -42,9 +53,6 @@ transition back to the On state.
 For at least ACPI, STR requires some minimal boot-strapping code to
 resume the system from STR. This may be true on other platforms. 
 
-A transition from Suspend-to-RAM to the On state should take about
-3-5 seconds. 
-
 
 State:         Suspend-to-disk
 ACPI State:    S4
@@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering
 down offers greater savings, and allows this mechanism to work on any
 system. However, entering a real low-power state allows the user to
 trigger wake up events (e.g. pressing a key or opening a laptop lid).
-
-A transition from Suspend-to-Disk to the On state should take about 30
-seconds, though it's typically a bit more with the current
-implementation. 
index c907be41d60f9bbae7e93eddfc4d80b46dcc2386..dc23e58ae2641a0ae876d2a1840530ae7e4ec59d 100644 (file)
@@ -147,6 +147,25 @@ Example signal handler:
       fix_the_problem(ucp->dar);
     }
 
+When in an active transaction that takes a signal, we need to be careful with
+the stack.  It's possible that the stack has moved back up after the tbegin.
+The obvious case here is when the tbegin is called inside a function that
+returns before a tend.  In this case, the stack is part of the checkpointed
+transactional memory state.  If we write over this non transactionally or in
+suspend, we are in trouble because if we get a tm abort, the program counter and
+stack pointer will be back at the tbegin but our in memory stack won't be valid
+anymore.
+
+To avoid this, when taking a signal in an active transaction, we need to use
+the stack pointer from the checkpointed state, rather than the speculated
+state.  This ensures that the signal context (written tm suspended) will be
+written below the stack required for the rollback.  The transaction is aborted
+becuase of the treclaim, so any memory written between the tbegin and the
+signal will be rolled back anyway.
+
+For signals taken in non-TM or suspended mode, we use the
+normal/non-checkpointed stack pointer.
+
 
 Failure cause codes used by kernel
 ==================================
@@ -155,14 +174,18 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the
 kernel aborted a transaction:
 
  TM_CAUSE_RESCHED       Thread was rescheduled.
+ TM_CAUSE_TLBI          Software TLB invalide.
  TM_CAUSE_FAC_UNAV      FP/VEC/VSX unavailable trap.
  TM_CAUSE_SYSCALL       Currently unused; future syscalls that must abort
                         transactions for consistency will use this.
  TM_CAUSE_SIGNAL        Signal delivered.
  TM_CAUSE_MISC          Currently unused.
+ TM_CAUSE_ALIGNMENT     Alignment fault.
+ TM_CAUSE_EMULATE       Emulation that touched memory.
 
-These can be checked by the user program's abort handler as TEXASR[0:7].
-
+These can be checked by the user program's abort handler as TEXASR[0:7].  If
+bit 7 is set, it indicates that the error is consider persistent.  For example
+a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q
 
 GDB
 ===
index c75694b35d08b7f6f70290a1b715658f4d6c7156..a9c16c979da215d35d774e8e739848fe535acdd1 100644 (file)
@@ -79,20 +79,63 @@ master port that is used to communicate with devices within the network.
 In order to initialize the RapidIO subsystem, a platform must initialize and
 register at least one master port within the RapidIO network. To register mport
 within the subsystem controller driver initialization code calls function
-rio_register_mport() for each available master port. After all active master
-ports are registered with a RapidIO subsystem, the rio_init_mports() routine
-is called to perform enumeration and discovery.
+rio_register_mport() for each available master port.
 
-In the current PowerPC-based implementation a subsys_initcall() is specified to
-perform controller initialization and mport registration. At the end it directly
-calls rio_init_mports() to execute RapidIO enumeration and discovery.
+RapidIO subsystem uses subsys_initcall() or device_initcall() to perform
+controller initialization (depending on controller device type).
+
+After all active master ports are registered with a RapidIO subsystem,
+an enumeration and/or discovery routine may be called automatically or
+by user-space command.
 
 4. Enumeration and Discovery
 ----------------------------
 
-When rio_init_mports() is called it scans a list of registered master ports and
-calls an enumeration or discovery routine depending on the configured role of a
-master port: host or agent.
+4.1 Overview
+------------
+
+RapidIO subsystem configuration options allow users to specify enumeration and
+discovery methods as statically linked components or loadable modules.
+An enumeration/discovery method implementation and available input parameters
+define how any given method can be attached to available RapidIO mports:
+simply to all available mports OR individually to the specified mport device.
+
+Depending on selected enumeration/discovery build configuration, there are
+several methods to initiate an enumeration and/or discovery process:
+
+  (a) Statically linked enumeration and discovery process can be started
+  automatically during kernel initialization time using corresponding module
+  parameters. This was the original method used since introduction of RapidIO
+  subsystem. Now this method relies on enumerator module parameter which is
+  'rio-scan.scan' for existing basic enumeration/discovery method.
+  When automatic start of enumeration/discovery is used a user has to ensure
+  that all discovering endpoints are started before the enumerating endpoint
+  and are waiting for enumeration to be completed.
+  Configuration option CONFIG_RAPIDIO_DISC_TIMEOUT defines time that discovering
+  endpoint waits for enumeration to be completed. If the specified timeout
+  expires the discovery process is terminated without obtaining RapidIO network
+  information. NOTE: a timed out discovery process may be restarted later using
+  a user-space command as it is described later if the given endpoint was
+  enumerated successfully.
+
+  (b) Statically linked enumeration and discovery process can be started by
+  a command from user space. This initiation method provides more flexibility
+  for a system startup compared to the option (a) above. After all participating
+  endpoints have been successfully booted, an enumeration process shall be
+  started first by issuing a user-space command, after an enumeration is
+  completed a discovery process can be started on all remaining endpoints.
+
+  (c) Modular enumeration and discovery process can be started by a command from
+  user space. After an enumeration/discovery module is loaded, a network scan
+  process can be started by issuing a user-space command.
+  Similar to the option (b) above, an enumerator has to be started first.
+
+  (d) Modular enumeration and discovery process can be started by a module
+  initialization routine. In this case an enumerating module shall be loaded
+  first.
+
+When a network scan process is started it calls an enumeration or discovery
+routine depending on the configured role of a master port: host or agent.
 
 Enumeration is performed by a master port if it is configured as a host port by
 assigning a host device ID greater than or equal to zero. A host device ID is
@@ -104,8 +147,58 @@ for it.
 The enumeration and discovery routines use RapidIO maintenance transactions
 to access the configuration space of devices.
 
-The enumeration process is implemented according to the enumeration algorithm
-outlined in the RapidIO Interconnect Specification: Annex I [1].
+4.2 Automatic Start of Enumeration and Discovery
+------------------------------------------------
+
+Automatic enumeration/discovery start method is applicable only to built-in
+enumeration/discovery RapidIO configuration selection. To enable automatic
+enumeration/discovery start by existing basic enumerator method set use boot
+command line parameter "rio-scan.scan=1".
+
+This configuration requires synchronized start of all RapidIO endpoints that
+form a network which will be enumerated/discovered. Discovering endpoints have
+to be started before an enumeration starts to ensure that all RapidIO
+controllers have been initialized and are ready to be discovered. Configuration
+parameter CONFIG_RAPIDIO_DISC_TIMEOUT defines time (in seconds) which
+a discovering endpoint will wait for enumeration to be completed.
+
+When automatic enumeration/discovery start is selected, basic method's
+initialization routine calls rio_init_mports() to perform enumeration or
+discovery for all known mport devices.
+
+Depending on RapidIO network size and configuration this automatic
+enumeration/discovery start method may be difficult to use due to the
+requirement for synchronized start of all endpoints.
+
+4.3 User-space Start of Enumeration and Discovery
+-------------------------------------------------
+
+User-space start of enumeration and discovery can be used with built-in and
+modular build configurations. For user-space controlled start RapidIO subsystem
+creates the sysfs write-only attribute file '/sys/bus/rapidio/scan'. To initiate
+an enumeration or discovery process on specific mport device, a user needs to
+write mport_ID (not RapidIO destination ID) into that file. The mport_ID is a
+sequential number (0 ... RIO_MAX_MPORTS) assigned during mport device
+registration. For example for machine with single RapidIO controller, mport_ID
+for that controller always will be 0.
+
+To initiate RapidIO enumeration/discovery on all available mports a user may
+write '-1' (or RIO_MPORT_ANY) into the scan attribute file.
+
+4.4 Basic Enumeration Method
+----------------------------
+
+This is an original enumeration/discovery method which is available since
+first release of RapidIO subsystem code. The enumeration process is
+implemented according to the enumeration algorithm outlined in the RapidIO
+Interconnect Specification: Annex I [1].
+
+This method can be configured as statically linked or loadable module.
+The method's single parameter "scan" allows to trigger the enumeration/discovery
+process from module initialization routine.
+
+This enumeration/discovery method can be started only once and does not support
+unloading if it is built as a module.
 
 The enumeration process traverses the network using a recursive depth-first
 algorithm. When a new device is found, the enumerator takes ownership of that
@@ -160,6 +253,19 @@ time period. If this wait time period expires before enumeration is completed,
 an agent skips RapidIO discovery and continues with remaining kernel
 initialization.
 
+4.5 Adding New Enumeration/Discovery Method
+-------------------------------------------
+
+RapidIO subsystem code organization allows addition of new enumeration/discovery
+methods as new configuration options without significant impact to to the core
+RapidIO code.
+
+A new enumeration/discovery method has to be attached to one or more mport
+devices before an enumeration/discovery process can be started. Normally,
+method's module initialization routine calls rio_register_scan() to attach
+an enumerator to a specified mport device (or devices). The basic enumerator
+implementation demonstrates this process.
+
 5. References
 -------------
 
index 97f71ce575d65c08652788c95746d47d975e02d0..19878179da4c78657967868fc36ce2abe35aae94 100644 (file)
@@ -88,3 +88,20 @@ that exports additional attributes.
 
 IDT_GEN2:
  errlog - reads contents of device error log until it is empty.
+
+
+5. RapidIO Bus Attributes
+-------------------------
+
+RapidIO bus subdirectory /sys/bus/rapidio implements the following bus-specific
+attribute:
+
+  scan - allows to trigger enumeration discovery process from user space. This
+        is a write-only attribute. To initiate an enumeration or discovery
+        process on specific mport device, a user needs to write mport_ID (not
+        RapidIO destination ID) into this file. The mport_ID is a sequential
+        number (0 ... RIO_MAX_MPORTS) assigned to the mport device.
+        For example, for a machine with a single RapidIO controller, mport_ID
+        for that controller always will be 0.
+        To initiate RapidIO enumeration/discovery on all available mports
+        a user must write '-1' (or RIO_MPORT_ANY) into this attribute file.
index 98335b7a533795607ccedcfb5d3975a45acba0fe..e658bbfb641fa643ce64be3c8515bff5f71a44a0 100644 (file)
@@ -26,7 +26,7 @@ Table : Subdirectories in /proc/sys/net
  ipv4      IP version 4        x25        X.25 protocol
  ipx       IPX                 token-ring IBM token ring
  bridge    Bridging            decnet     DEC net
- ipv6      IP version 6
+ ipv6      IP version 6        tipc       TIPC
 ..............................................................................
 
 1. /proc/sys/net/core - Network core options
@@ -50,6 +50,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
 it's a Per-CPU variable.
 Default: 64
 
+low_latency_read
+----------------
+Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
+Approximate time in us to spin waiting for packets on the device queue.
+This sets the default value of the SO_LL socket option.
+Can be set or overridden per socket by setting socket option SO_LL.
+Recommended value is 50. May increase power usage.
+Default: 0 (off)
+
+low_latency_poll
+----------------
+Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
+Approximate time in us to spin waiting for packets on the device queue.
+Recommended value depends on the number of sockets you poll on.
+For several sockets 50, for several hundreds 100.
+For more than that you probably want to use epoll.
+Note that only sockets with SO_LL set will be busy polled, so you want to either
+selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally.
+May increase power usage.
+Default: 0 (off)
+
 rmem_default
 ------------
 
@@ -93,8 +114,7 @@ netdev_budget
 
 Maximum number of packets taken from all interfaces in one polling cycle (NAPI
 poll). In one polling cycle interfaces which are registered to polling are
-probed in a round-robin manner. The limit of packets in one such probe can be
-set per-device via sysfs class/net/<device>/weight .
+probed in a round-robin manner.
 
 netdev_max_backlog
 ------------------
@@ -201,3 +221,18 @@ IPX.
 The /proc/net/ipx_route  table  holds  a list of IPX routes. For each route it
 gives the  destination  network, the router node (or Directly) and the network
 address of the router (or Connected) for internal networks.
+
+6. TIPC
+-------------------------------------------------------
+
+The TIPC protocol now has a tunable for the receive memory, similar to the
+tcp_rmem - i.e. a vector of 3 INTEGERs: (min, default, max)
+
+    # cat /proc/sys/net/tipc/tipc_rmem
+    4252725 34021800        68043600
+    #
+
+The max value is set to CONN_OVERLOAD_LIMIT, and the default and min values
+are scaled (shifted) versions of that same value.  Note that the min value
+is not at this point in time used in any meaningful way, but the triplet is
+preserved in order to be consistent with things like tcp_rmem.
index e5069c22339197fd3fad9d25518aa6be971b5c03..60d6a33935001388f038948c025d56ddefb50be9 100644 (file)
@@ -2895,8 +2895,8 @@ F:        drivers/media/dvb-frontends/ec100*
 
 ECRYPT FILE SYSTEM
 M:     Tyler Hicks <tyhicks@canonical.com>
-M:     Dustin Kirkland <dustin.kirkland@gazzang.com>
 L:     ecryptfs@vger.kernel.org
+W:     http://ecryptfs.org
 W:     https://launchpad.net/ecryptfs
 S:     Supported
 F:     Documentation/filesystems/ecryptfs.txt
@@ -3327,11 +3327,12 @@ F:      drivers/net/wan/dlci.c
 F:     drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
-M:     Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+M:     Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
+M:     Tomi Valkeinen <tomi.valkeinen@ti.com>
 L:     linux-fbdev@vger.kernel.org
 W:     http://linux-fbdev.sourceforge.net/
 Q:     http://patchwork.kernel.org/project/linux-fbdev/list/
-T:     git git://github.com/schandinat/linux-2.6.git fbdev-next
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git
 S:     Maintained
 F:     Documentation/fb/
 F:     Documentation/devicetree/bindings/fb/
@@ -3870,9 +3871,16 @@ M:       K. Y. Srinivasan <kys@microsoft.com>
 M:     Haiyang Zhang <haiyangz@microsoft.com>
 L:     devel@linuxdriverproject.org
 S:     Maintained
-F:     drivers/hv/
+F:     arch/x86/include/asm/mshyperv.h
+F:     arch/x86/include/uapi/asm/hyperv.h
+F:     arch/x86/kernel/cpu/mshyperv.c
 F:     drivers/hid/hid-hyperv.c
+F:     drivers/hv/
 F:     drivers/net/hyperv/
+F:     drivers/scsi/storvsc_drv.c
+F:     drivers/video/hyperv_fb.c
+F:     include/linux/hyperv.h
+F:     tools/hv/
 
 I2C OVER PARALLEL PORT
 M:     Jean Delvare <khali@linux-fr.org>
@@ -4445,6 +4453,16 @@ S:       Maintained
 F:     drivers/scsi/*iscsi*
 F:     include/scsi/*iscsi*
 
+ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
+M:     Or Gerlitz <ogerlitz@mellanox.com>
+M:     Roi Dayan <roid@mellanox.com>
+L:     linux-rdma@vger.kernel.org
+S:     Supported
+W:     http://www.openfabrics.org
+W:     www.open-iscsi.org
+Q:     http://patchwork.kernel.org/project/linux-rdma/list/
+F:     drivers/infiniband/ulp/iser
+
 ISDN SUBSYSTEM
 M:     Karsten Keil <isdn@linux-pingi.de>
 L:     isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -4646,12 +4664,13 @@ F:      include/linux/sunrpc/
 F:     include/uapi/linux/sunrpc/
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:     Marcelo Tosatti <mtosatti@redhat.com>
 M:     Gleb Natapov <gleb@redhat.com>
+M:     Paolo Bonzini <pbonzini@redhat.com>
 L:     kvm@vger.kernel.org
-W:     http://kvm.qumranet.com
+W:     http://linux-kvm.org
 S:     Supported
-F:     Documentation/*/kvm.txt
+F:     Documentation/*/kvm*.txt
+F:     Documentation/virtual/kvm/
 F:     arch/*/kvm/
 F:     arch/*/include/asm/kvm*
 F:     include/linux/kvm*
@@ -4981,6 +5000,13 @@ S:       Maintained
 F:     Documentation/hwmon/lm90
 F:     drivers/hwmon/lm90.c
 
+LM95234 HARDWARE MONITOR DRIVER
+M:     Guenter Roeck <linux@roeck-us.net>
+L:     lm-sensors@lm-sensors.org
+S:     Maintained
+F:     Documentation/hwmon/lm95234
+F:     drivers/hwmon/lm95234.c
+
 LME2510 MEDIA DRIVER
 M:     Malcolm Priestley <tvboxspy@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -5514,18 +5540,18 @@ F:      Documentation/networking/s2io.txt
 F:     Documentation/networking/vxge.txt
 F:     drivers/net/ethernet/neterion/
 
-NETFILTER/IPTABLES/IPCHAINS
-P:     Harald Welte
-P:     Jozsef Kadlecsik
+NETFILTER/IPTABLES
 M:     Pablo Neira Ayuso <pablo@netfilter.org>
 M:     Patrick McHardy <kaber@trash.net>
+M:     Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
 L:     netfilter-devel@vger.kernel.org
 L:     netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://1984.lsi.us.es/nf
-T:     git git://1984.lsi.us.es/nf-next
+Q:     http://patchwork.ozlabs.org/project/netfilter-devel/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
@@ -5745,7 +5771,7 @@ M:        Matthew Wilcox <willy@linux.intel.com>
 L:     linux-nvme@lists.infradead.org
 T:     git git://git.infradead.org/users/willy/linux-nvme.git
 S:     Supported
-F:     drivers/block/nvme.c
+F:     drivers/block/nvme*
 F:     include/linux/nvme.h
 
 OMAP SUPPORT
@@ -6074,9 +6100,18 @@ L:       linux-parisc@vger.kernel.org
 W:     http://www.parisc-linux.org/
 Q:     http://patchwork.kernel.org/project/linux-parisc/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 S:     Maintained
 F:     arch/parisc/
+F:     Documentation/parisc/
 F:     drivers/parisc/
+F:     drivers/char/agp/parisc-agp.c
+F:     drivers/input/serio/gscps2.c
+F:     drivers/parport/parport_gsc.*
+F:     drivers/tty/serial/8250/8250_gsc.c
+F:     drivers/video/sti*
+F:     drivers/video/console/sti*
+F:     drivers/video/logo/logo_parisc*
 
 PC87360 HARDWARE MONITORING DRIVER
 M:     Jim Cromie <jim.cromie@gmail.com>
@@ -7594,7 +7629,7 @@ F:        drivers/clk/spear/
 SPI SUBSYSTEM
 M:     Mark Brown <broonie@kernel.org>
 M:     Grant Likely <grant.likely@linaro.org>
-L:     spi-devel-general@lists.sourceforge.net
+L:     linux-spi@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
 Q:     http://patchwork.kernel.org/project/spi-devel-general/list/
 S:     Maintained
@@ -7859,7 +7894,7 @@ L:        linux-scsi@vger.kernel.org
 L:     target-devel@vger.kernel.org
 L:     http://groups.google.com/group/linux-iscsi-target-dev
 W:     http://www.linux-iscsi.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
 S:     Supported
 F:     drivers/target/
 F:     include/target/
@@ -8187,6 +8222,13 @@ F:       drivers/mmc/host/sh_mobile_sdhi.c
 F:     include/linux/mmc/tmio.h
 F:     include/linux/mmc/sh_mobile_sdhi.h
 
+TMP401 HARDWARE MONITOR DRIVER
+M:     Guenter Roeck <linux@roeck-us.net>
+L:     lm-sensors@lm-sensors.org
+S:     Maintained
+F:     Documentation/hwmon/tmp401
+F:     drivers/hwmon/tmp401.c
+
 TMPFS (SHMEM FILESYSTEM)
 M:     Hugh Dickins <hughd@google.com>
 L:     linux-mm@kvack.org
@@ -8967,7 +9009,7 @@ S:        Maintained
 F:     drivers/net/wireless/wl3501*
 
 WM97XX TOUCHSCREEN DRIVERS
-M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
+M:     Mark Brown <broonie@kernel.org>
 M:     Liam Girdwood <lrg@slimlogic.co.uk>
 L:     linux-input@vger.kernel.org
 T:     git git://opensource.wolfsonmicro.com/linux-2.6-touch
@@ -8977,7 +9019,6 @@ F:        drivers/input/touchscreen/*wm97*
 F:     include/linux/wm97xx.h
 
 WOLFSON MICROELECTRONICS DRIVERS
-M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
 L:     patches@opensource.wolfsonmicro.com
 T:     git git://opensource.wolfsonmicro.com/linux-2.6-asoc
 T:     git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
index cd11e88576044e127cb73f49bd41c7d4fb5d3e9f..c6863b55f7c7a48d53338d51d6c5a6b711cbadc1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc6
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index dd0e8eb8042f746cc7072c0bf67d1fac8c0d4a7e..a4429bcd609ec8a3761e064d200d0646c3cae5f1 100644 (file)
@@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS
 config GENERIC_SMP_IDLE_THREAD
        bool
 
+config GENERIC_IDLE_POLL_SETUP
+       bool
+
 # Select if arch init_task initializer is different to init/init_task.c
 config ARCH_INIT_TASK
        bool
index eee6ea76bdaff2d5bd2e07583e4e0259e595cda9..4885825e498d754b6efb891d1e28a9e9f8536dbc 100644 (file)
@@ -81,4 +81,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _UAPI_ASM_SOCKET_H */
index c0fd3623c39387d5f2fbdc1da3552f08ff13e4da..0fa0d4abe79557cf8338c92ddf2a58a3e9d9d61c 100644 (file)
@@ -37,7 +37,7 @@
 
        soc100 {
                uart@FF100000 {
-                       pinctrl-names = "abilis,simple-default";
+                       pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
                ethernet@FE100000 {
index 6f8c381f62685102891817da925d6b6b8aed9827..a4d80ce283aec1e7df6ca447fadc2a31b72f8b2c 100644 (file)
@@ -37,7 +37,7 @@
 
        soc100 {
                uart@FF100000 {
-                       pinctrl-names = "abilis,simple-default";
+                       pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
                ethernet@FE100000 {
index a6139fc5aaa3e2f6faefbc7fdd5e369611697145..b97e3051ba4bfcd43db2bd66c00df1815a87090b 100644 (file)
@@ -88,8 +88,7 @@
                };
 
                uart@FF100000 {
-                       compatible = "snps,dw-apb-uart",
-                                       "abilis,simple-pinctrl";
+                       compatible = "snps,dw-apb-uart";
                        reg = <0xFF100000 0x100>;
                        clock-frequency = <166666666>;
                        interrupts = <25 1>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        cell-index = <1>;
-                       compatible = "abilis,tb100-spi",
-                                       "abilis,simple-pinctrl";
+                       compatible = "abilis,tb100-spi";
                        num-cs = <2>;
                        reg = <0xFE011000 0x20>;
                        interrupt-parent = <&tb10x_ictl>;
index 9f841af41092f059a604c0631984bb3c6ef067d7..ef62682e8d9567670fc435be9bc5ddfddb5fa06b 100644 (file)
@@ -93,14 +93,16 @@ static inline int cache_is_vipt_aliasing(void)
 #endif
 }
 
-#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
+#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
 
 /*
  * checks if two addresses (after page aligning) index into same cache set
  */
 #define addr_not_cache_congruent(addr1, addr2)                         \
+({                                                                     \
        cache_is_vipt_aliasing() ?                                      \
-               (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0          \
+               (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0;         \
+})
 
 #define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
 do {                                                                   \
index 374a35514116c17a71a610c2959e90d213e7ed97..ab84bf131fe135a2070c4a22bc5c177020787759 100644 (file)
 #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
 #define copy_page(to, from)            memcpy((to), (from), PAGE_SIZE)
 
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-
-#define clear_user_page(addr, vaddr, pg)       clear_page(addr)
-#define copy_user_page(vto, vfrom, vaddr, pg)  copy_page(vto, vfrom)
-
-#else  /* VIPT aliasing dcache */
-
 struct vm_area_struct;
 struct page;
 
@@ -35,8 +28,6 @@ void copy_user_highpage(struct page *to, struct page *from,
                        unsigned long u_vaddr, struct vm_area_struct *vma);
 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
 
-#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
-
 #undef STRICT_MM_TYPECHECKS
 
 #ifdef STRICT_MM_TYPECHECKS
index 1cc4720faccbecf7862253720988f54e9b43751a..95b1522212a73fce42cd1a7a5c18546c61232346 100644 (file)
@@ -57,9 +57,9 @@
 
 #define _PAGE_ACCESSED      (1<<1)     /* Page is accessed (S) */
 #define _PAGE_CACHEABLE     (1<<2)     /* Page is cached (H) */
-#define _PAGE_EXECUTE       (1<<3)     /* Page has user execute perm (H) */
-#define _PAGE_WRITE         (1<<4)     /* Page has user write perm (H) */
-#define _PAGE_READ          (1<<5)     /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE     (1<<3)     /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE       (1<<4)     /* Page has user write perm (H) */
+#define _PAGE_U_READ        (1<<5)     /* Page has user read perm (H) */
 #define _PAGE_K_EXECUTE     (1<<6)     /* Page has kernel execute perm (H) */
 #define _PAGE_K_WRITE       (1<<7)     /* Page has kernel write perm (H) */
 #define _PAGE_K_READ        (1<<8)     /* Page has kernel perm (H) */
@@ -72,9 +72,9 @@
 
 /* PD1 */
 #define _PAGE_CACHEABLE     (1<<0)     /* Page is cached (H) */
-#define _PAGE_EXECUTE       (1<<1)     /* Page has user execute perm (H) */
-#define _PAGE_WRITE         (1<<2)     /* Page has user write perm (H) */
-#define _PAGE_READ          (1<<3)     /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE     (1<<1)     /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE       (1<<2)     /* Page has user write perm (H) */
+#define _PAGE_U_READ        (1<<3)     /* Page has user read perm (H) */
 #define _PAGE_K_EXECUTE     (1<<4)     /* Page has kernel execute perm (H) */
 #define _PAGE_K_WRITE       (1<<5)     /* Page has kernel write perm (H) */
 #define _PAGE_K_READ        (1<<6)     /* Page has kernel perm (H) */
@@ -93,7 +93,8 @@
 #endif
 
 /* Kernel allowed all permissions for all pages */
-#define _K_PAGE_PERMS  (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+#define _K_PAGE_PERMS  (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
+                       _PAGE_GLOBAL | _PAGE_PRESENT)
 
 #ifdef CONFIG_ARC_CACHE_PAGES
 #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
  * -by default cached, unless config otherwise
  * -present in memory
  */
-#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+
+#define _PAGE_READ     (_PAGE_U_READ    | _PAGE_K_READ)
+#define _PAGE_WRITE    (_PAGE_U_WRITE   | _PAGE_K_WRITE)
+#define _PAGE_EXECUTE  (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
 
 /* Set of bits not changed in pte_modify */
 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
  * kernel vaddr space - visible in all addr spaces, but kernel mode only
  * Thus Global, all-kernel-access, no-user-access, cached
  */
-#define PAGE_KERNEL          __pgprot(___DEF | _PAGE_GLOBAL)
+#define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
 
 /* ioremap */
-#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
-                                                    _PAGE_GLOBAL)
+#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
 
 /**************************************************************************
  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
index 85b6df839bd7b93b11465c066bdd063b32e68b77..cb0c708ca6654cd38d0d73f40e6f43135e34c01e 100644 (file)
@@ -16,7 +16,7 @@
 /* Masks for actual TLB "PD"s */
 #define PTE_BITS_IN_PD0        (_PAGE_GLOBAL | _PAGE_PRESENT)
 #define PTE_BITS_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE | \
-                        _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+                        _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
                         _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
 
 #ifndef __ASSEMBLY__
index 2f12bca8aef30c4155b21e514e3ba9dd5ec63468..aedce1905441cffb1958f00af149b0a939740371 100644 (file)
@@ -610,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 
        local_irq_save(flags);
        __ic_line_inv_vaddr(paddr, vaddr, len);
-       __dc_line_op(paddr, vaddr, len, OP_FLUSH);
+       __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
        local_irq_restore(flags);
 }
 
@@ -676,6 +676,17 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
        flush_cache_all();
 }
 
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+                    unsigned long u_vaddr)
+{
+       /* TBD: do we really need to clear the kernel mapping */
+       __flush_dcache_page(page_address(page), u_vaddr);
+       __flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
 void copy_user_highpage(struct page *to, struct page *from,
        unsigned long u_vaddr, struct vm_area_struct *vma)
 {
@@ -725,16 +736,6 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
        set_bit(PG_arch_1, &page->flags);
 }
 
-void flush_anon_page(struct vm_area_struct *vma, struct page *page,
-                    unsigned long u_vaddr)
-{
-       /* TBD: do we really need to clear the kernel mapping */
-       __flush_dcache_page(page_address(page), u_vaddr);
-       __flush_dcache_page(page_address(page), page_address(page));
-
-}
-
-#endif
 
 /**********************************************************************
  * Explicit Cache flush request from user space via syscall
index 066145b5f3488bcaa515769728d0389a00fc7ed7..fe1c5a073afe4cf996d28b6486c6b296fbd94344 100644 (file)
@@ -444,7 +444,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
         *             so userspace sees the right data.
         *  (Avoids the flush for Non-exec + congruent mapping case)
         */
-       if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
+       if ((vma->vm_flags & VM_EXEC) ||
+            addr_not_cache_congruent(paddr, vaddr)) {
                struct page *page = pfn_to_page(pte_pfn(*ptep));
 
                int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
index 9df765dc7c3abb56755cac6591aa5765ca155587..3357d26ffe54267a8ba9d26e596019b187e23bdd 100644 (file)
@@ -277,7 +277,7 @@ ARC_ENTRY EV_TLBMissI
        ;----------------------------------------------------------------
        ; VERIFY_PTE: Check if PTE permissions approp for executing code
        cmp_s   r2, VMALLOC_START
-       mov.lo  r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+       mov.lo  r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE)
        mov.hs  r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
 
        and     r3, r0, r2  ; Mask out NON Flag bits from PTE
@@ -320,9 +320,9 @@ ARC_ENTRY EV_TLBMissD
        mov_s   r2, 0
        lr      r3, [ecr]
        btst_s  r3, ECR_C_BIT_DTLB_LD_MISS      ; Read Access
-       or.nz   r2, r2, _PAGE_READ              ; chk for Read flag in PTE
+       or.nz   r2, r2, _PAGE_U_READ            ; chk for Read flag in PTE
        btst_s  r3, ECR_C_BIT_DTLB_ST_MISS      ; Write Access
-       or.nz   r2, r2, _PAGE_WRITE             ; chk for Write flag in PTE
+       or.nz   r2, r2, _PAGE_U_WRITE           ; chk for Write flag in PTE
        ; Above laddering takes care of XCHG access
        ;   which is both Read and Write
 
index d3567691c7e1259174a9b6db4cf65593c2fb7536..06cb309294608a6753652049e7c4f3df6d8f8ada 100644 (file)
@@ -34,31 +34,6 @@ static void __init tb10x_platform_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static void __init tb10x_platform_late_init(void)
-{
-       struct device_node *dn;
-
-       /*
-        * Pinctrl documentation recommends setting up the iomux here for
-        * all modules which don't require control over the pins themselves.
-        * Modules which need this kind of assistance are compatible with
-        * "abilis,simple-pinctrl", i.e. we can easily iterate over them.
-        * TODO: Does this recommended method work cleanly with pins required
-        * by modules?
-        */
-       for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") {
-               struct platform_device *pd = of_find_device_by_node(dn);
-               struct pinctrl *pctl;
-
-               pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default");
-               if (IS_ERR(pctl)) {
-                       int ret = PTR_ERR(pctl);
-                       dev_err(&pd->dev, "Could not set up pinctrl: %d\n",
-                               ret);
-               }
-       }
-}
-
 static const char *tb10x_compat[] __initdata = {
        "abilis,arc-tb10x",
        NULL,
@@ -67,5 +42,4 @@ static const char *tb10x_compat[] __initdata = {
 MACHINE_START(TB10x, "tb10x")
        .dt_compat      = tb10x_compat,
        .init_machine   = tb10x_platform_init,
-       .init_late      = tb10x_platform_late_init,
 MACHINE_END
index d423d58f938dc40fb5b3c01445b9184572631b10..49d993cee51232874a81814fe39ca99e09b88bad 100644 (file)
@@ -38,6 +38,7 @@ config ARM
        select HAVE_GENERIC_HARDIRQS
        select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
        select HAVE_IDE if PCI || ISA || PCMCIA
+       select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZMA
        select HAVE_KERNEL_LZO
@@ -488,7 +489,7 @@ config ARCH_IXP4XX
 config ARCH_DOVE
        bool "Marvell Dove"
        select ARCH_REQUIRE_GPIOLIB
-       select CPU_V7
+       select CPU_PJ4
        select GENERIC_CLOCKEVENTS
        select MIGHT_HAVE_PCI
        select PINCTRL
index 47374085befdf6d4ad1cc49c0714e52453ec510f..1ba358ba16b871aec3b366cab9b4e4066048e69c 100644 (file)
@@ -309,7 +309,7 @@ define archhelp
   echo  '  Image         - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
   echo  '* xipImage      - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)'
   echo  '  uImage        - U-Boot wrapped zImage'
-  echo  '  bootpImage    - Combined zImage and initial RAM disk' 
+  echo  '  bootpImage    - Combined zImage and initial RAM disk'
   echo  '                  (supply initrd image via make variable INITRD=<path>)'
   echo  '* dtbs          - Build device tree blobs for enabled boards'
   echo  '  install       - Install uncompressed kernel'
index 3580d57ea21841285bc687d928f39268a325edc1..79e9bdbfc491a29521939aa2747862fc491c6d6d 100644 (file)
@@ -124,7 +124,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 
 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
-asflags-y := -Wa,-march=all -DZIMAGE
+asflags-y := -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
 KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
index 6e8382d5b7a4d31418a934565a473b823ffe5b6f..5392ee63338fac3453f30b125366e03241158133 100644 (file)
@@ -1,6 +1,8 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
+#ifndef CONFIG_DEBUG_SEMIHOSTING
+
 #include CONFIG_DEBUG_LL_INCLUDE
 
 ENTRY(putc)
@@ -10,3 +12,29 @@ ENTRY(putc)
        busyuart r3, r1
        mov      pc, lr
 ENDPROC(putc)
+
+#else
+
+ENTRY(putc)
+       adr     r1, 1f
+       ldmia   r1, {r2, r3}
+       add     r2, r2, r1
+       ldr     r1, [r2, r3]
+       strb    r0, [r1]
+       mov     r0, #0x03               @ SYS_WRITEC
+   ARM(        svc     #0x123456       )
+ THUMB(        svc     #0xab           )
+       mov     pc, lr
+       .align  2
+1:     .word   _GLOBAL_OFFSET_TABLE_ - .
+       .word   semi_writec_buf(GOT)
+ENDPROC(putc)
+
+       .bss
+       .global semi_writec_buf
+       .type   semi_writec_buf, %object
+semi_writec_buf:
+       .space  4
+       .size   semi_writec_buf, 4
+
+#endif
index 6179d94dd5c665a634e5e2913a916c96c9652c84..3115e313d9f65a31ad746b7d8d282741a5b1f765 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/mach-types.h>
 
                .section        ".start", "ax"
+               .arch   armv4
 
 __SA1100_start:
 
index 089c560e07f13947ed68ab446edcc9de40c30604..92b56897ed64014037b48eb0e677bb633d646be1 100644 (file)
@@ -18,6 +18,7 @@
        
                .section        ".start", "ax"
 
+               .arch armv4
                b       __beginning
        
 __ofw_data:    .long   0                               @ the number of memory blocks
index fe4d9c3ad761c8dfaadce6e214d709417947f396..032a8d987148b6a24c97d7ec05467bef14b82ab0 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
+       .arch   armv7-a
 /*
  * Debugging stuff
  *
@@ -805,8 +806,8 @@ call_cache_fn:      adr     r12, proc_types
                .align  2
                .type   proc_types,#object
 proc_types:
-               .word   0x00000000              @ old ARM ID
-               .word   0x0000f000
+               .word   0x41000000              @ old ARM ID
+               .word   0xff00f000
                mov     pc, lr
  THUMB(                nop                             )
                mov     pc, lr
index b9f7121e6ecf02c561e5b1b10308659aad23fb28..f0895c581a89be8668a99db10e6873ae94be0cef 100644 (file)
@@ -177,7 +177,9 @@ dtb-$(CONFIG_ARCH_SPEAR3XX)+= spear300-evb.dtb \
        spear320-evb.dtb \
        spear320-hmi.dtb
 dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb
-dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \
+dtb-$(CONFIG_ARCH_SUNXI) += \
+       sun4i-a10-cubieboard.dtb \
+       sun4i-a10-mini-xplus.dtb \
        sun4i-a10-hackberry.dtb \
        sun5i-a13-olinuxino.dtb
 dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
index 5302f79c05b719e3317bb0e3431c144073691724..4b5a8e065dcb60c577bcb138de40ab8141558f1c 100644 (file)
 
 &cpsw_emac0 {
        phy_id = <&davinci_mdio>, <0>;
+       phy-mode = "mii";
 };
 
 &cpsw_emac1 {
        phy_id = <&davinci_mdio>, <1>;
+       phy-mode = "mii";
 };
index 0423298a26fe634e174b4138cac7ee2ee28f6a2c..814ee037fd51fbb9a81774b2c4ba5e23fda6ae3a 100644 (file)
 
 &cpsw_emac0 {
        phy_id = <&davinci_mdio>, <0>;
+       phy-mode = "rgmii-txid";
 };
 
 &cpsw_emac1 {
        phy_id = <&davinci_mdio>, <1>;
+       phy-mode = "rgmii-txid";
 };
index f67c360844f482f20338c9b18b5ff0b76f2de542..4297899a24708a30e13e7cf5e24b99a591fb0eeb 100644 (file)
                };
        };
 };
+
+&cpsw_emac0 {
+       phy_id = <&davinci_mdio>, <0>;
+       phy-mode = "rgmii-txid";
+};
+
+&cpsw_emac1 {
+       phy_id = <&davinci_mdio>, <1>;
+       phy-mode = "rgmii-txid";
+};
index 1460d9b88adfee928d8c150ae56812abbb2b2d28..8e1248f01fab0638e3872cef25371b1a660872ea 100644 (file)
                        ti,hwmods = "gpmc";
                        reg = <0x50000000 0x2000>;
                        interrupts = <100>;
-                       num-cs = <7>;
-                       num-waitpins = <2>;
+                       gpmc,num-cs = <7>;
+                       gpmc,num-waitpins = <2>;
                        #address-cells = <2>;
                        #size-cells = <1>;
                        status = "disabled";
index 272bbc65fab05b809b9df0a57497c14d37eac302..550eb772c30e4c47c2f7f0896469a033dee3e23b 100644 (file)
@@ -33,7 +33,8 @@
                #size-cells = <1>;
                compatible = "simple-bus";
                interrupt-parent = <&mpic>;
-               ranges = <0 0 0xd0000000 0x100000>;
+               ranges = <0          0 0xd0000000 0x0100000 /* internal registers */
+                         0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>;
 
                internal-regs {
                        compatible = "simple-bus";
index b2c1b5af9749cd0f0b358d32dfbb159c7deab280..aee2b1866ce2ede35fbd58a6bbea1e29ac6b16ea 100644 (file)
@@ -29,7 +29,8 @@
        };
 
        soc {
-               ranges = <0 0xd0000000 0x100000>;
+               ranges = <0          0xd0000000 0x0100000 /* internal registers */
+                         0xe0000000 0xe0000000 0x8100000 /* PCIe */>;
                internal-regs {
                        system-controller@18200 {
                                compatible = "marvell,armada-370-xp-system-controller";
 
                        L2: l2-cache {
                                compatible = "marvell,aurora-outer-cache";
-                               reg = <0xd0008000 0x1000>;
+                               reg = <0x08000 0x1000>;
                                cache-id-part = <0x100>;
                                wt-override;
                        };
 
-                       mpic: interrupt-controller@20000 {
+                       interrupt-controller@20000 {
                                reg = <0x20a00 0x1d0>, <0x21870 0x58>;
                        };
 
index 26ad06fc147ed78f9446bb5119a1e6e001867dba..76db557adbe7bf36b43265019ec99c1870b90e26 100644 (file)
        };
 
        soc {
+               ranges = <0          0 0xd0000000 0x100000  /* Internal registers 1MiB */
+                         0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
+                         0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB  */>;
+
                internal-regs {
                        serial@12000 {
                                clock-frequency = <250000000>;
index f14d36c4615984399b71c36e920ed84be96fcb74..fdea75c73411997bcb68ab86bc1ccd349a2caadd 100644 (file)
        };
 
        soc {
+               ranges = <0          0 0xd0000000 0x100000      /* Internal registers 1MiB */
+                         0xe0000000 0 0xe0000000 0x8100000     /* PCIe */
+                         0xf0000000 0 0xf0000000 0x8000000     /* Device Bus, NOR 128MiB   */>;
+
                internal-regs {
                        serial@12000 {
                                clock-frequency = <250000000>;
index bacab11c10dc8151eb7f2b6eb8cf0d2905a52f0e..5b902f9a3af29a84fd0ee83000c8d0f2ffa280ea 100644 (file)
@@ -31,7 +31,7 @@
                                wt-override;
                        };
 
-                       mpic: interrupt-controller@20000 {
+                       interrupt-controller@20000 {
                              reg = <0x20a00 0x2d0>, <0x21070 0x58>;
                        };
 
index 70b5ccbac234a63d12228e161e2620d4e2d8e40a..84c4bef2d7268760a6d927bd8ed2fdf7d547ea59 100644 (file)
                                                atmel,pins =
                                                        <0 10 0x2 0x0   /* PA10 periph B */
                                                         0 11 0x2 0x0   /* PA11 periph B */
-                                                        0 24 0x2 0x0   /* PA24 periph B */
+                                                        0 22 0x2 0x0   /* PA22 periph B */
                                                         0 25 0x2 0x0   /* PA25 periph B */
                                                         0 26 0x2 0x0   /* PA26 periph B */
                                                         0 27 0x2 0x0   /* PA27 periph B */
index 3de8e6dfbcb150baa7251d1803990724d93e4fe2..8d25f889928eccd3c7a3440dd9522e4f1d34e37c 100644 (file)
@@ -57,6 +57,7 @@
                                compatible = "atmel,at91rm9200-aic";
                                interrupt-controller;
                                reg = <0xfffff000 0x200>;
+                               atmel,external-irqs = <31>;
                        };
 
                        ramc0: ramc@ffffe800 {
index 3b40d11d65e70c0daabdeddafe27180cacd12825..315250b4995e74e4452af0d0e46bb847790fbaf5 100644 (file)
@@ -11,7 +11,7 @@
 /include/ "at91sam9x5ek.dtsi"
 
 / {
-       model = "Atmel AT91SAM9G25-EK";
+       model = "Atmel AT91SAM9X25-EK";
        compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
 
        ahb {
index f0052dccf9a86858325bcaa98d9c34d8beec4e37..1e12aeff403b018cf174ff1b710af391f970c997 100644 (file)
@@ -44,6 +44,7 @@
                        reg = <0x7e201000 0x1000>;
                        interrupts = <2 25>;
                        clock-frequency = <3000000>;
+                       arm,primecell-periphid = <0x00241011>;
                };
 
                gpio: gpio {
index 98dfc3ea5c0bee94a80a08da8c5d994d84125977..0673524238a61f706c7c17a096ef3319fb095287 100644 (file)
                clock-names = "usbhost";
        };
 
+       usbphy@12130000 {
+               compatible = "samsung,exynos5250-usb2phy";
+               reg = <0x12130000 0x100>;
+               clocks = <&clock 1>, <&clock 285>;
+               clock-names = "ext_xtal", "usbhost";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               usbphy-sys {
+                       reg = <0x10040704 0x8>,
+                             <0x10050230 0x4>;
+               };
+       };
+
        amba {
                #address-cells = <1>;
                #size-cells = <1>;
index d2550e0bca24e2468892fb7bd9fdfafdf0bbb45c..701153992c695bb5455c8cdfc13d1a796f3ceebd 100644 (file)
                                #size-cells = <0>;
                                compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
                                reg = <0x43fa4000 0x4000>;
-                               clocks = <&clks 62>;
-                               clock-names = "ipg";
+                               clocks = <&clks 62>, <&clks 62>;
+                               clock-names = "ipg", "per";
                                interrupts = <14>;
                                status = "disabled";
                        };
                                compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
                                reg = <0x50004000 0x4000>;
                                interrupts = <0>;
-                               clocks = <&clks 80>;
-                               clock-names = "ipg";
+                               clocks = <&clks 80>, <&clks 80>;
+                               clock-names = "ipg", "per";
                                status = "disabled";
                        };
 
                                #size-cells = <0>;
                                compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
                                reg = <0x50010000 0x4000>;
-                               clocks = <&clks 79>;
-                               clock-names = "ipg";
+                               clocks = <&clks 79>, <&clks 79>;
+                               clock-names = "ipg", "per";
                                interrupts = <13>;
                                status = "disabled";
                        };
index ff4bd4873edf269aa8bbfb51ca2466a3d13112be..75bd11386516df223cec5e77cb9692a1545d9d7e 100644 (file)
                                compatible = "fsl,imx27-cspi";
                                reg = <0x1000e000 0x1000>;
                                interrupts = <16>;
-                               clocks = <&clks 53>, <&clks 0>;
+                               clocks = <&clks 53>, <&clks 53>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
                                compatible = "fsl,imx27-cspi";
                                reg = <0x1000f000 0x1000>;
                                interrupts = <15>;
-                               clocks = <&clks 52>, <&clks 0>;
+                               clocks = <&clks 52>, <&clks 52>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
                                compatible = "fsl,imx27-cspi";
                                reg = <0x10017000 0x1000>;
                                interrupts = <6>;
-                               clocks = <&clks 51>, <&clks 0>;
+                               clocks = <&clks 51>, <&clks 51>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
index 21bb786c5b31ebf090bab7a8152921606d7aa852..53fdde69bbf4a1254d4119629398a6f4c64ec81b 100644 (file)
                                compatible = "fsl,imx51-cspi", "fsl,imx35-cspi";
                                reg = <0x83fc0000 0x4000>;
                                interrupts = <38>;
-                               clocks = <&clks 55>, <&clks 0>;
+                               clocks = <&clks 55>, <&clks 55>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
index 845982eaac22facc6210f699d13f9665edd53319..eb83aa039b8b91bd44655322bb19a850f2b26039 100644 (file)
                                compatible = "fsl,imx53-cspi", "fsl,imx35-cspi";
                                reg = <0x63fc0000 0x4000>;
                                interrupts = <38>;
-                               clocks = <&clks 55>, <&clks 0>;
+                               clocks = <&clks 55>, <&clks 55>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
index 82a404da1c0d9531b632e265943632e7ea48d468..99ba6e14ebf3f5880660e3073aebda1fdce3e53b 100644 (file)
                usb_otg_hs: usb_otg_hs@480ab000 {
                        compatible = "ti,omap3-musb";
                        reg = <0x480ab000 0x1000>;
-                       interrupts = <0 92 0x4>, <0 93 0x4>;
+                       interrupts = <92>, <93>;
                        interrupt-names = "mc", "dma";
                        ti,hwmods = "usb_otg_hs";
                        multipoint = <1>;
index 03bd60deb52b8faf6e5043eb1a346739c60b1c43..eeb734e257096b3d1e13b651e25e95576fe875fb 100644 (file)
        };
 };
 
+&omap4_pmx_wkup {
+       pinctrl-names = "default";
+       pinctrl-0 = <
+                       &twl6030_wkup_pins
+       >;
+
+       twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+               pinctrl-single,pins = <
+                       0x14 0x2        /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+               >;
+       };
+};
+
 &omap4_pmx_core {
        pinctrl-names = "default";
        pinctrl-0 = <
+                       &twl6030_pins
                        &twl6040_pins
                        &mcpdm_pins
                        &mcbsp1_pins
                        &tpd12s015_pins
        >;
 
+       twl6030_pins: pinmux_twl6030_pins {
+               pinctrl-single,pins = <
+                       0x15e 0x4118    /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+               >;
+       };
+
        twl6040_pins: pinmux_twl6040_pins {
                pinctrl-single,pins = <
                        0xe0 0x3        /* hdq_sio.gpio_127 OUTPUT | MODE3 */
index a35d9cd5806317dfa5cfc82e3f648fec38711822..98505a2ef1622afecb200b05907076411219dc6b 100644 (file)
        };
 };
 
+&omap4_pmx_wkup {
+       pinctrl-names = "default";
+       pinctrl-0 = <
+                       &twl6030_wkup_pins
+       >;
+
+       twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+               pinctrl-single,pins = <
+                       0x14 0x2        /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+               >;
+       };
+};
+
 &omap4_pmx_core {
        pinctrl-names = "default";
        pinctrl-0 = <
+                       &twl6030_pins
                        &twl6040_pins
                        &mcpdm_pins
                        &dmic_pins
                >;
        };
 
+       twl6030_pins: pinmux_twl6030_pins {
+               pinctrl-single,pins = <
+                       0x15e 0x4118    /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+               >;
+       };
+
        twl6040_pins: pinmux_twl6040_pins {
                pinctrl-single,pins = <
                        0xe0 0x3        /* hdq_sio.gpio_127 OUTPUT | MODE3 */
index 3dd7ff825828630ef96ac5ddc2f7f95d532c1242..635cae2830112906abe4bb6fd7058a58fa95d8b9 100644 (file)
                        interrupts = <0 41 0x4>;
                        ti,hwmods = "timer5";
                        ti,timer-dsp;
+                       ti,timer-pwm;
                };
 
                timer6: timer@4013a000 {
                        reg = <0x4803e000 0x80>;
                        interrupts = <0 45 0x4>;
                        ti,hwmods = "timer9";
+                       ti,timer-pwm;
                };
 
                timer10: timer@48086000 {
                        reg = <0x48086000 0x80>;
                        interrupts = <0 46 0x4>;
                        ti,hwmods = "timer10";
+                       ti,timer-pwm;
                };
 
                timer11: timer@48088000 {
index 2e643ea51cceba014b713843a81a1f11a4fd88be..5000e0d428496d8105f6157f4ba54b4603796657 100644 (file)
                                compatible = "atmel,at91sam9x5-spi";
                                reg = <0xf0004000 0x100>;
                                interrupts = <24 4 3>;
-                               cs-gpios = <&pioD 13 0
-                                           &pioD 14 0 /* conflicts with SCK0 and CANRX0 */
-                                           &pioD 15 0 /* conflicts with CTS0 and CANTX0 */
-                                           &pioD 16 0 /* conflicts with RTS0 and PWMFI3 */
-                                          >;
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_spi0>;
                                status = "disabled";
                        };
 
                        macb0: ethernet@f0028000 {
-                               compatible = "cnds,pc302-gem", "cdns,gem";
+                               compatible = "cdns,pc302-gem", "cdns,gem";
                                reg = <0xf0028000 0x100>;
                                interrupts = <34 4 3>;
                                pinctrl-names = "default";
                                compatible = "atmel,at91sam9x5-spi";
                                reg = <0xf8008000 0x100>;
                                interrupts = <25 4 3>;
-                               cs-gpios = <&pioC 25 0
-                                           &pioC 26 0 /* conflitcs with TWD1 and ISI_D11 */
-                                           &pioC 27 0 /* conflitcs with TWCK1 and ISI_D10 */
-                                           &pioC 28 0 /* conflitcs with PWMFI0 and ISI_D9 */
-                                          >;
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_spi1>;
                                status = "disabled";
index 1f8ed404626cdb1edba6825a49c90d7a435d81aa..b336e7787cb3ea35ac312e83a66bc948a988e977 100644 (file)
 
        ahb {
                apb {
+                       spi0: spi@f0004000 {
+                               cs-gpios = <&pioD 13 0>, <0>, <0>, <0>;
+                       };
+
                        macb0: ethernet@f0028000 {
                                phy-mode = "rgmii";
                        };
index b28fbf3408e3b29c265db2f7448649819fe85df1..6f82d9368948856e8123b7193bbfc8230986f96f 100644 (file)
                bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
        };
 
+       /* This is where the interrupt is routed on the S8815 board */
+       external-bus@34000000 {
+               ethernet@300 {
+                       interrupt-parent = <&gpio3>;
+                       interrupts = <8 0x1>;
+               };
+       };
+
        /* Custom board node with GPIO pins to active etc */
        usb-s8815 {
                /* The S8815 is using this very GPIO pin for the SMSC91x IRQs */
                ethernet-gpio {
-                       gpios = <&gpio3 19 0x1>;
-                       interrupts = <19 0x1>;
-                       interrupt-parent = <&gpio3>;
+                       gpios = <&gpio3 8 0x1>;
                };
                /* This will bias the MMC/SD card detect line */
                mmcsd-gpio {
index b70fe0db6bb7583cd541d25b7573a057caec130d..e752b570b4ecdf9c36f12b183137d9ce70271a05 100644 (file)
        };
 
        soc@01c20000 {
+               emac: ethernet@01c0b000 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&emac_pins_a>;
+                       phy = <&phy1>;
+                       status = "okay";
+               };
+
+               mdio@01c0b080 {
+                       status = "okay";
+
+                       phy1: ethernet-phy@1 {
+                               reg = <1>;
+                       };
+               };
+
                pinctrl@01c20800 {
                        led_pins_cubieboard: led_pins@0 {
                                allwinner,pins = "PH20", "PH21";
index b9efac100c85b2aac70c41a5bb4c9b8d83a11a11..3514b37d66bcd180df6e963a5d816e3446ef9379 100644 (file)
        };
 
        soc@01c20000 {
+               emac: ethernet@01c0b000 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&emac_pins_a>;
+                       phy = <&phy0>;
+                       status = "okay";
+               };
+
+               mdio@01c0b080 {
+                       phy-supply = <&reg_emac_3v3>;
+                       status = "okay";
+
+                       phy0: ethernet-phy@0 {
+                               reg = <0>;
+                       };
+               };
+
+               pio: pinctrl@01c20800 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&hackberry_hogs>;
+
+                       hackberry_hogs: hogs@0 {
+                               allwinner,pins = "PH19";
+                               allwinner,function = "gpio_out";
+                               allwinner,drive = <0>;
+                               allwinner,pull = <0>;
+                       };
+               };
+
                uart0: serial@01c28000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&uart0_pins_a>;
                        status = "okay";
                };
        };
+
+       regulators {
+               compatible = "simple-bus";
+
+               reg_emac_3v3: emac-3v3 {
+                       compatible = "regulator-fixed";
+                       regulator-name = "emac-3v3";
+                       regulator-min-microvolt = <3300000>;
+                       regulator-max-microvolt = <3300000>;
+                       enable-active-high;
+                       gpio = <&pio 7 19 0>;
+               };
+       };
 };
index 4a7c35d6726aaa1290e4ee42b6ac47f517fc159a..078ed7f618d7910cc6030fdf9b288b3b124da065 100644 (file)
@@ -22,8 +22,8 @@
                bootargs = "earlyprintk console=ttyS0,115200";
        };
 
-       soc {
-               uart0: uart@01c28000 {
+       soc@01c20000 {
+               uart0: serial@01c28000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&uart0_pins_a>;
                        status = "okay";
index e7ef619a70a2531440400086cdac4ee6ae3b71eb..983da33bebaaa0d2914629a01ef03922263de439 100644 (file)
                reg = <0x01c20000 0x300000>;
                ranges;
 
+               emac: ethernet@01c0b000 {
+                       compatible = "allwinner,sun4i-emac";
+                       reg = <0x01c0b000 0x1000>;
+                       interrupts = <55>;
+                       clocks = <&ahb_gates 17>;
+                       status = "disabled";
+               };
+
+               mdio@01c0b080 {
+                       compatible = "allwinner,sun4i-mdio";
+                       reg = <0x01c0b080 0x14>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
                intc: interrupt-controller@01c20400 {
                        compatible = "allwinner,sun4i-ic";
                        reg = <0x01c20400 0x400>;
                                allwinner,drive = <0>;
                                allwinner,pull = <0>;
                        };
+
+                       emac_pins_a: emac0@0 {
+                               allwinner,pins = "PA0", "PA1", "PA2",
+                                               "PA3", "PA4", "PA5", "PA6",
+                                               "PA7", "PA8", "PA9", "PA10",
+                                               "PA11", "PA12", "PA13", "PA14",
+                                               "PA15", "PA16";
+                               allwinner,function = "emac";
+                               allwinner,drive = <0>;
+                               allwinner,pull = <0>;
+                       };
                };
 
                timer@01c20c00 {
index 52b88d81b7bbaf435d7a9d7d4c85a9e5fe59f07b..3caed0db698614f45652476595ed54b42de54ffc 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/smp.h>
 #include <linux/spinlock.h>
 
-#include <linux/irqchip/arm-gic.h>
-
 #include <asm/mcpm.h>
 #include <asm/smp.h>
 #include <asm/smp_plat.h>
@@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
 static void __cpuinit mcpm_secondary_init(unsigned int cpu)
 {
        mcpm_cpu_powered_up();
-       gic_secondary_init(0);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index e40b435d204e85f12d41cfc2bd5b43f84f99354e..227abf9cc6018fe2a53ed6146cff8a2d69ee0f97 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
@@ -7,17 +7,18 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
 CONFIG_ARCH_EXYNOS=y
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
+CONFIG_S3C_LOWLEVEL_UART_PORT=3
 CONFIG_S3C24XX_PWM=y
 CONFIG_ARCH_EXYNOS5=y
 CONFIG_MACH_EXYNOS4_DT=y
-CONFIG_MACH_EXYNOS5_DT=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
@@ -30,35 +31,58 @@ CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_RFKILL_REGULATOR=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
 CONFIG_NETDEVICES=y
 CONFIG_SMSC911X=y
 CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
 CONFIG_USB_NET_SMSC95XX=y
 CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
+CONFIG_KEYBOARD_CROS_EC=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_CYAPA=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_HW_RANDOM=y
+CONFIG_TCG_TPM=y
+CONFIG_TCG_TIS_I2C_INFINEON=y
 CONFIG_I2C=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_ARB_GPIO_CHALLENGE=y
+CONFIG_I2C_S3C2410=y
+CONFIG_DEBUG_GPIO=y
 # CONFIG_HWMON is not set
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_MAX77686=y
+CONFIG_MFD_MAX8997=y
+CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_TPS65090=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_MAX8997=y
+CONFIG_REGULATOR_MAX77686=y
+CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
 CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_SIMPLE=y
 CONFIG_EXYNOS_VIDEO=y
 CONFIG_EXYNOS_MIPI_DSI=y
 CONFIG_EXYNOS_DP=y
@@ -67,6 +91,20 @@ CONFIG_FONTS=y
 CONFIG_FONT_7x14=y
 CONFIG_LOGO=y
 CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_S5P=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_PHY=y
+CONFIG_SAMSUNG_USB2PHY=y
+CONFIG_SAMSUNG_USB3PHY=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_S3C=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
@@ -79,6 +117,7 @@ CONFIG_ROMFS_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
@@ -87,6 +126,5 @@ CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRC_CCITT=y
index 7e0ebb64a7f9dafe5c73b9201c39e344c309ef93..9940f7b4e438c258d125cc6567ac1b7a9b2fc429 100644 (file)
@@ -199,7 +199,6 @@ CONFIG_USB_PHY=y
 CONFIG_USB_DEBUG=y
 CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
index c1ef64bc5abd65781da53de52d4cfd3e392e29cc..769c0f039882c2d99661c41832b2738e82a1ccfd 100644 (file)
@@ -20,6 +20,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_MULTI_V6=y
 CONFIG_ARCH_OMAP2PLUS=y
 CONFIG_OMAP_RESET_CLOCKS=y
 CONFIG_OMAP_MUX_DEBUG=y
@@ -204,7 +205,6 @@ CONFIG_USB=y
 CONFIG_USB_DEBUG=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
 CONFIG_USB_WDM=y
 CONFIG_USB_STORAGE=y
@@ -284,3 +284,4 @@ CONFIG_SOC_OMAP5=y
 CONFIG_TI_DAVINCI_MDIO=y
 CONFIG_TI_DAVINCI_CPDMA=y
 CONFIG_TI_CPSW=y
+CONFIG_AT803X_PHY=y
index a5f0485133cf9d9fc608a742e158b629405e1c7d..f7ba316164d4e9e4572f396249e70ad65e9163a9 100644 (file)
@@ -153,6 +153,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_USB_VIDEO_CLASS=m
 CONFIG_DRM=y
+CONFIG_TEGRA_HOST1X=y
 CONFIG_DRM_TEGRA=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
@@ -202,7 +203,7 @@ CONFIG_TEGRA20_APB_DMA=y
 CONFIG_STAGING=y
 CONFIG_SENSORS_ISL29018=y
 CONFIG_SENSORS_ISL29028=y
-CONFIG_SENSORS_AK8975=y
+CONFIG_AK8975=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
index 92c6eed7aac9cded01cd9bd259deba3bd6fda782..99207c45ec10f56891cd40d53b8a5fe524337f0f 100644 (file)
@@ -195,6 +195,7 @@ ENTRY(sha1_block_data_order)
        add     r3,r3,r10                       @ E+=F_00_19(B,C,D)
        cmp     r14,sp
        bne     .L_00_15                @ [((11+4)*5+2)*3]
+       sub     sp,sp,#25*4
 #if __ARM_ARCH__<7
        ldrb    r10,[r1,#2]
        ldrb    r9,[r1,#3]
@@ -290,7 +291,6 @@ ENTRY(sha1_block_data_order)
        add     r3,r3,r10                       @ E+=F_00_19(B,C,D)
 
        ldr     r8,.LK_20_39            @ [+15+16*4]
-       sub     sp,sp,#25*4
        cmn     sp,#0                   @ [+3], clear carry to denote 20_39
 .L_20_39_or_60_79:
        ldr     r9,[r14,#15*4]
index 7eb18c1d8d6cbce723b9f62c77746dad81aa11ec..4f009c10540dff2a2e7efd08b0671c2369547b90 100644 (file)
@@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),       \
                                                atomic64_t,             \
                                                counter),               \
-                                             (unsigned long)(o),       \
-                                             (unsigned long)(n)))
+                                             (unsigned long long)(o),  \
+                                             (unsigned long long)(n)))
 
 #define cmpxchg64_local(ptr, o, n)                                     \
        ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),        \
                                                local64_t,              \
                                                a),                     \
-                                            (unsigned long)(o),        \
-                                            (unsigned long)(n)))
+                                            (unsigned long long)(o),   \
+                                            (unsigned long long)(n)))
 
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
index 968c0a14e0a36b12565a0bf05f2393b61ed0a27f..209e6504922e1c0b5a63733572055af723e37119 100644 (file)
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
 static inline unsigned long __my_cpu_offset(void)
 {
        unsigned long off;
-       /* Read TPIDRPRW */
-       asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
+       register unsigned long *sp asm ("sp");
+
+       /*
+        * Read TPIDRPRW.
+        * We want to allow caching the value, so avoid using volatile and
+        * instead use a fake stack read to hazard against barrier().
+        */
+       asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+
        return off;
 }
 #define __my_cpu_offset __my_cpu_offset()
index 99a19512ee26e2e5d99135d21f10d8b99e606226..bdf2b8458ec1d3bb0366ff6ce8aa6623e234aa02 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 
-/*
- * We need to delay page freeing for SMP as other CPUs can access pages
- * which have been removed but not yet had their TLB entries invalidated.
- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
- * we need to apply this same delaying tactic to ensure correct operation.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
-#define tlb_fast_mode(tlb)     0
-#else
-#define tlb_fast_mode(tlb)     1
-#endif
-
 #define MMU_GATHER_BUNDLE      8
 
 /*
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
        tlb_flush(tlb);
-       if (!tlb_fast_mode(tlb)) {
-               free_pages_and_swap_cache(tlb->pages, tlb->nr);
-               tlb->nr = 0;
-               if (tlb->pages == tlb->local)
-                       __tlb_alloc_page(tlb);
-       }
+       free_pages_and_swap_cache(tlb->pages, tlb->nr);
+       tlb->nr = 0;
+       if (tlb->pages == tlb->local)
+               __tlb_alloc_page(tlb);
 }
 
 static inline void
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 
 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
-       if (tlb_fast_mode(tlb)) {
-               free_page_and_swap_cache(page);
-               return 1; /* avoid calling tlb_flush_mmu */
-       }
-
        tlb->pages[tlb->nr++] = page;
        VM_BUG_ON(tlb->nr > tlb->max);
        return tlb->max - tlb->nr;
index 2848857f5b62f91f7babe3ae928d932e252f723c..fbd24beeb1fad70886ea044387c873a65ef455e2 100644 (file)
@@ -24,9 +24,9 @@
 #define U8500_UART0_PHYS_BASE  (0x80120000)
 #define U8500_UART1_PHYS_BASE  (0x80121000)
 #define U8500_UART2_PHYS_BASE  (0x80007000)
-#define U8500_UART0_VIRT_BASE  (0xa8120000)
-#define U8500_UART1_VIRT_BASE  (0xa8121000)
-#define U8500_UART2_VIRT_BASE  (0xa8007000)
+#define U8500_UART0_VIRT_BASE  (0xf8120000)
+#define U8500_UART1_VIRT_BASE  (0xf8121000)
+#define U8500_UART2_VIRT_BASE  (0xf8007000)
 #define __UX500_PHYS_UART(n)   U8500_UART##n##_PHYS_BASE
 #define __UX500_VIRT_UART(n)   U8500_UART##n##_VIRT_BASE
 #endif
index f219703168366f8142321b93750e0027f1e6c6b2..282de4826abb640bd310ce8cf6099dec297803ce 100644 (file)
@@ -411,7 +411,6 @@ static struct vm_area_struct gate_vma = {
        .vm_start       = 0xffff0000,
        .vm_end         = 0xffff0000 + PAGE_SIZE,
        .vm_flags       = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
-       .vm_mm          = &init_mm,
 };
 
 static int __init gate_vma_init(void)
index 47ab90563bf48b3febdef5fd4085cd2071d551f9..550d63cef68e4be6ada87a3c7942ccd767061c6d 100644 (file)
@@ -251,7 +251,7 @@ void __ref cpu_die(void)
         * this returns, power and/or clocks can be removed at any point
         * from this CPU and its cache by platform_cpu_kill().
         */
-       RCU_NONIDLE(complete(&cpu_died));
+       complete(&cpu_died);
 
        /*
         * Ensure that the cache lines associated with that completion are
index f10316b4ecdc7558b05644d3758532a26fa6de58..c5a59546a256c9b3720022265eb08fcde0dc4f68 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/percpu.h>
 #include <linux/node.h>
@@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
  * cpu topology table
  */
 struct cputopo_arm cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
index 37d216d814cdd62d12040666e70192b0a33fe350..ef1703b9587b0264c5b7ea8ebb246376dfe76326 100644 (file)
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu)
        wait_event_interruptible(*wq, !vcpu->arch.pause);
 }
 
+static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.target >= 0;
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:      The VCPU pointer
@@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        int ret;
        sigset_t sigsaved;
 
-       /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
-       if (unlikely(vcpu->arch.target < 0))
+       if (unlikely(!kvm_vcpu_initialized(vcpu)))
                return -ENOEXEC;
 
        ret = kvm_vcpu_first_run_init(vcpu);
@@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_SET_ONE_REG:
        case KVM_GET_ONE_REG: {
                struct kvm_one_reg reg;
+
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       return -ENOEXEC;
+
                if (copy_from_user(&reg, argp, sizeof(reg)))
                        return -EFAULT;
                if (ioctl == KVM_SET_ONE_REG)
@@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_reg_list reg_list;
                unsigned n;
 
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       return -ENOEXEC;
+
                if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
                        return -EFAULT;
                n = reg_list.n;
index 965706578f13bdb0ff062f9df2502608de7a3ae9..84ba67b982c0d32546dae5092ec84ac1d620756b 100644 (file)
@@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector;
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
-       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
+       /*
+        * This function also gets called when dealing with HYP page
+        * tables. As HYP doesn't have an associated struct kvm (and
+        * the HYP page tables are fairly static), we don't do
+        * anything there.
+        */
+       if (kvm)
+               kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
        return p;
 }
 
-static void clear_pud_entry(pud_t *pud)
+static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
        pmd_t *pmd_table = pmd_offset(pud, 0);
        pud_clear(pud);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
        pmd_free(NULL, pmd_table);
        put_page(virt_to_page(pud));
 }
 
-static void clear_pmd_entry(pmd_t *pmd)
+static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
        pte_t *pte_table = pte_offset_kernel(pmd, 0);
        pmd_clear(pmd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
        pte_free_kernel(NULL, pte_table);
        put_page(virt_to_page(pmd));
 }
@@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd)
        return page_count(pmd_page) == 1;
 }
 
-static void clear_pte_entry(pte_t *pte)
+static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
 {
        if (pte_present(*pte)) {
                kvm_set_pte(pte, __pte(0));
                put_page(virt_to_page(pte));
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
        }
 }
 
@@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte)
        return page_count(pte_page) == 1;
 }
 
-static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
+static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+                       unsigned long long start, u64 size)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
                }
 
                pte = pte_offset_kernel(pmd, addr);
-               clear_pte_entry(pte);
+               clear_pte_entry(kvm, pte, addr);
                range = PAGE_SIZE;
 
                /* If we emptied the pte, walk back up the ladder */
                if (pte_empty(pte)) {
-                       clear_pmd_entry(pmd);
+                       clear_pmd_entry(kvm, pmd, addr);
                        range = PMD_SIZE;
                        if (pmd_empty(pmd)) {
-                               clear_pud_entry(pud);
+                               clear_pud_entry(kvm, pud, addr);
                                range = PUD_SIZE;
                        }
                }
@@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void)
        mutex_lock(&kvm_hyp_pgd_mutex);
 
        if (boot_hyp_pgd) {
-               unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+               unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+               unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
                kfree(boot_hyp_pgd);
                boot_hyp_pgd = NULL;
        }
 
        if (hyp_pgd)
-               unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+               unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
        kfree(init_bounce_page);
        init_bounce_page = NULL;
@@ -200,9 +211,10 @@ void free_hyp_pgds(void)
 
        if (hyp_pgd) {
                for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-                       unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+                       unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
                for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-                       unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+                       unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+
                kfree(hyp_pgd);
                hyp_pgd = NULL;
        }
@@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
  */
 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
 {
-       unmap_range(kvm->arch.pgd, start, size);
+       unmap_range(kvm, kvm->arch.pgd, start, size);
 }
 
 /**
@@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm,
 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 {
        unmap_stage2_range(kvm, gpa, PAGE_SIZE);
-       kvm_tlb_flush_vmid_ipa(kvm, gpa);
 }
 
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
index 2acdff4c1dfea8c72c7106dff630a4870d6e70a9..180b3024bec3ab36cc2d7cdb62e92d7b2298d297 100644 (file)
@@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
 static struct clock_event_device clkevt = {
        .name           = "at91_tick",
        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+       .shift          = 32,
        .rating         = 150,
        .set_next_event = clkevt32k_next_event,
        .set_mode       = clkevt32k_mode,
@@ -264,9 +265,11 @@ void __init at91rm9200_timer_init(void)
        at91_st_write(AT91_ST_RTMR, 1);
 
        /* Setup timer clockevent, with minimum of two ticks (important!!) */
+       clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
+       clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
+       clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
        clkevt.cpumask = cpumask_of(0);
-       clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK,
-                                       2, AT91_ST_ALMV);
+       clockevents_register_device(&clkevt);
 
        /* register clocksource */
        clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
index 13cdbcd48f51eca105add53d8a674a2ea0e5fa45..c7d670d118025eaf71a4dd145d0d3a33d300a415 100644 (file)
@@ -223,13 +223,7 @@ static void __init at91sam9n12_map_io(void)
        at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE);
 }
 
-void __init at91sam9n12_initialize(void)
-{
-       at91_extern_irq = (1 << AT91SAM9N12_ID_IRQ0);
-}
-
 AT91_SOC_START(at91sam9n12)
        .map_io = at91sam9n12_map_io,
        .register_clocks = at91sam9n12_register_clocks,
-       .init = at91sam9n12_initialize,
 AT91_SOC_END
index 31df12029c4e9a5b32695874e95fb9e4e4c75589..2bd7f51b0b8204d92c4bde747d197ad9dc21fa36 100644 (file)
@@ -179,9 +179,9 @@ extern void __iomem *at91_pmc_base;
 #define                AT91_PMC_PCR_CMD        (0x1  <<  12)           /* Command (read=0, write=1) */
 #define                AT91_PMC_PCR_DIV(n)     ((n)  <<  16)           /* Divisor Value */
 #define                        AT91_PMC_PCR_DIV0       0x0                     /* Peripheral clock is MCK */
-#define                        AT91_PMC_PCR_DIV2       0x2                     /* Peripheral clock is MCK/2 */
-#define                        AT91_PMC_PCR_DIV4       0x4                     /* Peripheral clock is MCK/4 */
-#define                        AT91_PMC_PCR_DIV8       0x8                     /* Peripheral clock is MCK/8 */
+#define                        AT91_PMC_PCR_DIV2       0x1                     /* Peripheral clock is MCK/2 */
+#define                        AT91_PMC_PCR_DIV4       0x2                     /* Peripheral clock is MCK/4 */
+#define                        AT91_PMC_PCR_DIV8       0x3                     /* Peripheral clock is MCK/8 */
 #define                AT91_PMC_PCR_EN         (0x1  <<  28)           /* Enable */
 
 #endif
index d19edff0ea6e07bbddefd7dd1d7a94d0cc171ff1..ff18fc2ea46f092bc68786065bcdd985da325c7e 100644 (file)
@@ -250,6 +250,7 @@ config MACH_ARMLEX4210
 config MACH_UNIVERSAL_C210
        bool "Mobile UNIVERSAL_C210 Board"
        select CLKSRC_MMIO
+       select CLKSRC_SAMSUNG_PWM
        select CPU_EXYNOS4210
        select EXYNOS4_SETUP_FIMC
        select EXYNOS4_SETUP_FIMD0
@@ -281,7 +282,6 @@ config MACH_UNIVERSAL_C210
        select S5P_DEV_TV
        select S5P_GPIO_INT
        select S5P_SETUP_MIPIPHY
-       select SAMSUNG_HRT
        help
          Machine support for Samsung Mobile Universal S5PC210 Reference
          Board.
@@ -410,6 +410,7 @@ config MACH_EXYNOS4_DT
        depends on ARCH_EXYNOS4
        select ARM_AMBA
        select CLKSRC_OF
+       select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
        select CPU_EXYNOS4210
        select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
        select PINCTRL
index 745e304ad0ded17ab6b2f9d9253592739afbc1df..f7e504b7874d05e01ff13f4dce35d98c9cbec80a 100644 (file)
  */
 
 #include <linux/kernel.h>
+#include <linux/bitops.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/gpio.h>
+#include <clocksource/samsung_pwm.h>
 #include <linux/sched.h>
 #include <linux/serial_core.h>
 #include <linux/of.h>
@@ -302,6 +304,13 @@ static struct map_desc exynos5440_iodesc0[] __initdata = {
        },
 };
 
+static struct samsung_pwm_variant exynos4_pwm_variant = {
+       .bits           = 32,
+       .div_base       = 0,
+       .has_tint_cstat = true,
+       .tclk_mask      = 0,
+};
+
 void exynos4_restart(char mode, const char *cmd)
 {
        __raw_writel(0x1, S5P_SWRESET);
@@ -317,9 +326,16 @@ void exynos5_restart(char mode, const char *cmd)
                val = 0x1;
                addr = EXYNOS_SWRESET;
        } else if (of_machine_is_compatible("samsung,exynos5440")) {
+               u32 status;
                np = of_find_compatible_node(NULL, NULL, "samsung,exynos5440-clock");
+
+               addr = of_iomap(np, 0) + 0xbc;
+               status = __raw_readl(addr);
+
                addr = of_iomap(np, 0) + 0xcc;
-               val = (0xfff << 20) | (0x1 << 16);
+               val = __raw_readl(addr);
+
+               val = (val & 0xffff0000) | (status & 0xffff);
        } else {
                pr_err("%s: cannot support non-DT\n", __func__);
                return;
@@ -370,6 +386,8 @@ int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
 
 void __init exynos_init_io(struct map_desc *mach_desc, int size)
 {
+       debug_ll_io_init();
+
 #ifdef CONFIG_OF
        if (initial_boot_params)
                of_scan_flat_dt(exynos_fdt_map_chipid, NULL);
@@ -442,8 +460,20 @@ static void __init exynos5440_map_io(void)
        iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
 }
 
+void __init exynos_set_timer_source(u8 channels)
+{
+       exynos4_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1;
+       exynos4_pwm_variant.output_mask &= ~channels;
+}
+
 void __init exynos_init_time(void)
 {
+       unsigned int timer_irqs[SAMSUNG_PWM_NUM] = {
+               EXYNOS4_IRQ_TIMER0_VIC, EXYNOS4_IRQ_TIMER1_VIC,
+               EXYNOS4_IRQ_TIMER2_VIC, EXYNOS4_IRQ_TIMER3_VIC,
+               EXYNOS4_IRQ_TIMER4_VIC,
+       };
+
        if (of_have_populated_dt()) {
 #ifdef CONFIG_OF
                of_clk_init(NULL);
@@ -455,7 +485,14 @@ void __init exynos_init_time(void)
                exynos4_clk_init(NULL, !soc_is_exynos4210(), S5P_VA_CMU, readl(S5P_VA_CHIPID + 8) & 1);
                exynos4_clk_register_fixed_ext(xxti_f, xusbxti_f);
 #endif
-               mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0, EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1);
+#ifdef CONFIG_CLKSRC_SAMSUNG_PWM
+               if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
+                       samsung_pwm_clocksource_init(S3C_VA_TIMER,
+                                       timer_irqs, &exynos4_pwm_variant);
+               else
+#endif
+                       mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0,
+                                       EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1);
        }
 }
 
index 60dd35cc01a608102ab629a748ea1d19ca8fc2a6..11fc1e29819bb58b2fc4271b78a8ba3e3a8b9776 100644 (file)
@@ -32,6 +32,8 @@ void exynos4_clk_register_fixed_ext(unsigned long, unsigned long);
 
 void exynos_firmware_init(void);
 
+void exynos_set_timer_source(u8 channels);
+
 #ifdef CONFIG_PM_GENERIC_DOMAINS
 int exynos_pm_late_initcall(void);
 #else
index 7dbbfec13ea5b786c57f0ae989ca4fde6ad54f1a..296090e7f423cc72952ccfefd6a4a60be25a044f 100644 (file)
 #ifndef __ASM_ARCH_PM_CORE_H
 #define __ASM_ARCH_PM_CORE_H __FILE__
 
+#include <linux/of.h>
 #include <mach/regs-pmu.h>
 
+#ifdef CONFIG_PINCTRL_EXYNOS
+extern u32 exynos_get_eint_wake_mask(void);
+#else
+static inline u32 exynos_get_eint_wake_mask(void) { return 0xffffffff; }
+#endif
+
 static inline void s3c_pm_debug_init_uart(void)
 {
        /* nothing here yet */
@@ -27,7 +34,12 @@ static inline void s3c_pm_debug_init_uart(void)
 
 static inline void s3c_pm_arch_prepare_irqs(void)
 {
-       __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+       u32 eintmask = s3c_irqwake_eintmask;
+
+       if (of_have_populated_dt())
+               eintmask = exynos_get_eint_wake_mask();
+
+       __raw_writel(eintmask, S5P_EINT_WAKEUP_MASK);
        __raw_writel(s3c_irqwake_intmask & ~(1 << 31), S5P_WAKEUP_MASK);
 }
 
index 327d50d4681d7e97dc7efa720ce39bccc11ead7d..74ddb2b55614234b69ee26d69ff51f5ce8cb3540 100644 (file)
@@ -41,7 +41,6 @@
 #include <plat/mfc.h>
 #include <plat/sdhci.h>
 #include <plat/fimc-core.h>
-#include <plat/samsung-time.h>
 #include <plat/camport.h>
 
 #include <mach/map.h>
@@ -1094,7 +1093,7 @@ static void __init universal_map_io(void)
 {
        exynos_init_io(NULL, 0);
        s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
-       samsung_set_timer_source(SAMSUNG_PWM2, SAMSUNG_PWM4);
+       exynos_set_timer_source(BIT(2) | BIT(4));
        xxti_f = 0;
        xusbxti_f = 24000000;
 }
@@ -1154,7 +1153,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
        .map_io         = universal_map_io,
        .init_machine   = universal_machine_init,
        .init_late      = exynos_init_late,
-       .init_time      = samsung_timer_init,
+       .init_time      = exynos_init_time,
        .reserve        = &universal_reserve,
        .restart        = exynos4_restart,
 MACHINE_END
index ba44328464f37c0cdce9eb9039fcfce8ad8dc743..af8e109953d1214fc6413f7614eaa2aa13ecfdde 100644 (file)
@@ -111,7 +111,6 @@ config SOC_IMX25
        select ARCH_MXC_IOMUX_V3
        select COMMON_CLK
        select CPU_ARM926T
-       select HAVE_CAN_FLEXCAN if CAN
        select MXC_AVIC
 
 config SOC_IMX27
@@ -137,7 +136,6 @@ config SOC_IMX35
        select ARCH_MXC_IOMUX_V3
        select COMMON_CLK
        select CPU_V6K
-       select HAVE_CAN_FLEXCAN if CAN
        select HAVE_EPIT
        select MXC_AVIC
        select SMP_ON_UP if SMP
@@ -776,7 +774,6 @@ comment "Device tree only"
 
 config SOC_IMX53
        bool "i.MX53 support"
-       select HAVE_CAN_FLEXCAN if CAN
        select HAVE_IMX_SRC
        select IMX_HAVE_PLATFORM_IMX2_WDT
        select PINCTRL
@@ -799,7 +796,6 @@ config SOC_IMX6Q
        select CPU_V7
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if LOCAL_TIMERS
-       select HAVE_CAN_FLEXCAN if CAN
        select HAVE_IMX_ANATOP
        select HAVE_IMX_GPC
        select HAVE_IMX_MMDC
index 151259003086e8d1f0db34015742ab307b3f986a..4e3148ce852dfe20faa2a820c19b083746ff8257 100644 (file)
@@ -177,17 +177,18 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
 static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
 static const char *pll1_sw_sels[]      = { "pll1_sys", "step", };
 static const char *periph_pre_sels[]   = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
-static const char *periph_clk2_sels[]  = { "pll3_usb_otg", "osc", };
+static const char *periph_clk2_sels[]  = { "pll3_usb_otg", "osc", "osc", "dummy", };
+static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
 static const char *periph_sels[]       = { "periph_pre", "periph_clk2", };
 static const char *periph2_sels[]      = { "periph2_pre", "periph2_clk2", };
-static const char *axi_sels[]          = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[]          = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
 static const char *audio_sels[]        = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
 static const char *gpu_axi_sels[]      = { "axi", "ahb", };
 static const char *gpu2d_core_sels[]   = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
 static const char *gpu3d_core_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
-static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
+static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
 static const char *ipu_sels[]          = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[]       = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
+static const char *ldb_di_sels[]       = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
 static const char *ipu_di_pre_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
 static const char *ipu1_di0_sels[]     = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
 static const char *ipu1_di1_sels[]     = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -369,8 +370,8 @@ int __init mx6q_clocks_init(void)
        clk[pll1_sw]          = imx_clk_mux("pll1_sw",          base + 0xc,  2,  1, pll1_sw_sels,      ARRAY_SIZE(pll1_sw_sels));
        clk[periph_pre]       = imx_clk_mux("periph_pre",       base + 0x18, 18, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
        clk[periph2_pre]      = imx_clk_mux("periph2_pre",      base + 0x18, 21, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
-       clk[periph_clk2_sel]  = imx_clk_mux("periph_clk2_sel",  base + 0x18, 12, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
-       clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+       clk[periph_clk2_sel]  = imx_clk_mux("periph_clk2_sel",  base + 0x18, 12, 2, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+       clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
        clk[axi_sel]          = imx_clk_mux("axi_sel",          base + 0x14, 6,  2, axi_sels,          ARRAY_SIZE(axi_sels));
        clk[esai_sel]         = imx_clk_mux("esai_sel",         base + 0x20, 19, 2, audio_sels,        ARRAY_SIZE(audio_sels));
        clk[asrc_sel]         = imx_clk_mux("asrc_sel",         base + 0x30, 7,  2, audio_sels,        ARRAY_SIZE(audio_sels));
@@ -498,7 +499,7 @@ int __init mx6q_clocks_init(void)
        clk[ldb_di1]      = imx_clk_gate2("ldb_di1",       "ldb_di1_podf",      base + 0x74, 14);
        clk[ipu2_di1]     = imx_clk_gate2("ipu2_di1",      "ipu2_di1_sel",      base + 0x74, 10);
        clk[hsi_tx]       = imx_clk_gate2("hsi_tx",        "hsi_tx_podf",       base + 0x74, 16);
-       clk[mlb]          = imx_clk_gate2("mlb",           "pll8_mlb",          base + 0x74, 18);
+       clk[mlb]          = imx_clk_gate2("mlb",           "axi",               base + 0x74, 18);
        clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi",  "mmdc_ch0_axi_podf", base + 0x74, 20);
        clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi",  "mmdc_ch1_axi_podf", base + 0x74, 22);
        clk[ocram]        = imx_clk_gate2("ocram",         "ahb",               base + 0x74, 28);
index 3dd2b1b041d15dc8d4eaf7e8880644c435660888..68c74fb0373c63a1f47b97630cf90f70b2e59ee1 100644 (file)
@@ -4,7 +4,6 @@ config IMX_HAVE_PLATFORM_FEC
 
 config IMX_HAVE_PLATFORM_FLEXCAN
        bool
-       select HAVE_CAN_FLEXCAN if CAN
 
 config IMX_HAVE_PLATFORM_FSL_USB2_UDC
        bool
index 67b9c48dcafe7dc1ee281c49ece9de88338297f4..627f16f0e9d1d393d48527e50f25c5e22c81987d 100644 (file)
        .section ".text.head", "ax"
 
 #ifdef CONFIG_SMP
+diag_reg_offset:
+       .word   g_diag_reg - .
+
+       .macro  set_diag_reg
+       adr     r0, diag_reg_offset
+       ldr     r1, [r0]
+       add     r1, r1, r0              @ r1 = physical &g_diag_reg
+       ldr     r0, [r1]
+       mcr     p15, 0, r0, c15, c0, 1  @ write diagnostic register
+       .endm
+
 ENTRY(v7_secondary_startup)
        bl      v7_invalidate_l1
+       set_diag_reg
        b       secondary_startup
 ENDPROC(v7_secondary_startup)
 #endif
index 4a69305db65e395366187a84cde322e37fc99e94..c6e1ab5448822c8aaed044413f726b0dc8c47a39 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/init.h>
 #include <linux/smp.h>
+#include <asm/cacheflush.h>
 #include <asm/page.h>
 #include <asm/smp_scu.h>
 #include <asm/mach/map.h>
@@ -21,6 +22,7 @@
 
 #define SCU_STANDBY_ENABLE     (1 << 5)
 
+u32 g_diag_reg;
 static void __iomem *scu_base;
 
 static struct map_desc scu_io_desc __initdata = {
@@ -80,6 +82,18 @@ void imx_smp_prepare(void)
 static void __init imx_smp_prepare_cpus(unsigned int max_cpus)
 {
        imx_smp_prepare();
+
+       /*
+        * The diagnostic register holds the errata bits.  Mostly bootloader
+        * does not bring up secondary cores, so that when errata bits are set
+        * in bootloader, they are set only for boot cpu.  But on a SMP
+        * configuration, it should be equally done on every single core.
+        * Read the register from boot cpu here, and will replicate it into
+        * secondary cores when booting them.
+        */
+       asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc");
+       __cpuc_flush_dcache_area(&g_diag_reg, sizeof(g_diag_reg));
+       outer_clean_range(__pa(&g_diag_reg), __pa(&g_diag_reg + 1));
 }
 
 struct smp_operations  imx_smp_ops __initdata = {
index acb0187c7ee1d036d4c119574e46fab6dbbef48d..4695d5f35fc937dc6eab658fc979d5ee2ecef721 100644 (file)
@@ -41,13 +41,3 @@ void __init qnap_dt_ts219_init(void)
 
        pm_power_off = qnap_tsx1x_power_off;
 }
-
-/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */
-static int __init ts219_pci_init(void)
-{
-       if (machine_is_ts219())
-               kirkwood_pcie_init(KW_PCIE0);
-
-       return 0;
-}
-subsys_initcall(ts219_pci_init);
index c2cae69e6d2bb9343798a7a3398ce174850d59cf..f389228975637cd85d3f9f9444ae30e0f34537cc 100644 (file)
@@ -528,12 +528,6 @@ void __init kirkwood_init_early(void)
 {
        orion_time_set_base(TIMER_VIRT_BASE);
 
-       /*
-        * Some Kirkwood devices allocate their coherent buffers from atomic
-        * context. Increase size of atomic coherent pool to make sure such
-        * the allocations won't fail.
-        */
-       init_dma_coherent_pool_size(SZ_1M);
        mvebu_mbus_init("marvell,kirkwood-mbus",
                        BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
                        DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
index 827cde42414f3d49e8f03cda318ec937f2bf90c0..e96fd71abd766b113c1479a272b1da0db2fea436 100644 (file)
@@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void)
 
        kirkwood_pcie_id(&dev, &rev);
 
-       if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
-           (dev == MV88F6282_DEV_ID))
+       if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
                return MPP_F6281_MASK;
+       if (dev == MV88F6282_DEV_ID)
+               return MPP_F6282_MASK;
        if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
                return MPP_F6192_MASK;
        if (dev == MV88F6180_DEV_ID)
index 283abff902287357a27da54427a390432317bca2..e1267d6b468f1f6d0d0a0e7efa40391bff077183 100644 (file)
@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void)
 static int __init ts219_pci_init(void)
 {
        if (machine_is_ts219())
-               kirkwood_pcie_init(KW_PCIE0);
+               kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
 
        return 0;
 }
index e11acbb0a46d4316c82f23cb333744cbab41d89f..80a8bcacd9d539c0a684e3beaa927b07dd720566 100644 (file)
@@ -15,6 +15,7 @@ config ARCH_MVEBU
        select MVEBU_CLK_GATING
        select MVEBU_MBUS
        select ZONE_DMA if ARM_LPAE
+       select ARCH_REQUIRE_GPIOLIB
 
 if ARCH_MVEBU
 
index 42a4cb3087e23ab04ea2e7e22971c1f38aeb9aa2..1c48890bb72b2c7ad1301cdc9f8bad007362bf57 100644 (file)
@@ -53,13 +53,6 @@ void __init armada_370_xp_init_early(void)
 {
        char *mbus_soc_name;
 
-       /*
-        * Some Armada 370/XP devices allocate their coherent buffers
-        * from atomic context. Increase size of atomic coherent pool
-        * to make sure such the allocations won't fail.
-        */
-       init_dma_coherent_pool_size(SZ_1M);
-
        /*
         * This initialization will be replaced by a DT-based
         * initialization once the mvebu-mbus driver gains DT support.
index 53e8391192cd25d7495cd4e6b6602552182a1dc2..5476669ba9056ff80d63fab31d8f266ef25a2652 100644 (file)
@@ -32,15 +32,21 @@ ENTRY(ll_set_cpu_coherent)
 
        /* Add CPU to SMP group - Atomic */
        add     r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
-       ldr     r2, [r3]
+1:
+       ldrex   r2, [r3]
        orr     r2, r2, r1
-       str     r2, [r3]
+       strex   r0, r2, [r3]
+       cmp     r0, #0
+       bne 1b
 
        /* Enable coherency on CPU - Atomic */
-       add     r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET
-       ldr     r2, [r3]
+       add     r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET
+1:
+       ldrex   r2, [r3]
        orr     r2, r2, r1
-       str     r2, [r3]
+       strex   r0, r2, [r3]
+       cmp     r0, #0
+       bne 1b
 
        dsb
 
index 4dc2fbba0ecd1bc511e3369a9916a263420b02f9..ce6e7d606a7ce9cc1867e0e83263e93610e8445d 100644 (file)
@@ -11,7 +11,6 @@ config SOC_IMX28
        select ARM_AMBA
        select ARM_CPU_SUSPEND if PM
        select CPU_ARM926T
-       select HAVE_CAN_FLEXCAN if CAN
        select HAVE_PWM
        select PINCTRL_IMX28
 
index 68ab858e27b754bea93264889ab9b020cf67d698..a94b3a718d1a771dda53f5e06542fb6a6989cee8 100644 (file)
@@ -345,6 +345,7 @@ static int __init omap1_system_dma_init(void)
                dev_err(&pdev->dev,
                        "%s: Memory allocation failed for d->chan!\n",
                        __func__);
+               ret = -ENOMEM;
                goto exit_release_d;
        }
 
index 6ebc7803bc3e37b48ff9c1b394a39e2a3dff9a28..af3544ce4f0273b8eb8b43c22295a5ae6ced76a3 100644 (file)
@@ -454,9 +454,29 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
  */
 DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732);
 
-DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0,
-               AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
-               0x0, NULL);
+static struct clk clkdiv32k_ick;
+
+static const char *clkdiv32k_ick_parent_names[] = {
+       "clkdiv32k_ck",
+};
+
+static const struct clk_ops clkdiv32k_ick_ops = {
+       .enable         = &omap2_dflt_clk_enable,
+       .disable        = &omap2_dflt_clk_disable,
+       .is_enabled     = &omap2_dflt_clk_is_enabled,
+       .init           = &omap2_init_clk_clkdm,
+};
+
+static struct clk_hw_omap clkdiv32k_ick_hw = {
+       .hw     = {
+               .clk    = &clkdiv32k_ick,
+       },
+       .enable_reg     = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
+       .enable_bit     = AM33XX_MODULEMODE_SWCTRL_SHIFT,
+       .clkdm_name     = "clk_24mhz_clkdm",
+};
+
+DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops);
 
 /* "usbotg_fck" is an additional clock and not really a modulemode */
 DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0,
index 8f3bf4e509082fad0dcb412a0a21573b68e4a485..bbd6a3f717e64e5b23cce67591f374d4dcb7b2f3 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/io.h>
 
 #include "clock.h"
 #include "clock36xx.h"
-
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
 /**
  * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
  */
 int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
 {
-       struct clk_hw_omap *parent;
+       struct clk_divider *parent;
        struct clk_hw *parent_hw;
-       u32 dummy_v, orig_v, clksel_shift;
+       u32 dummy_v, orig_v;
        int ret;
 
        /* Clear PWRDN bit of HSDIVIDER */
        ret = omap2_dflt_clk_enable(clk);
 
        parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
-       parent = to_clk_hw_omap(parent_hw);
+       parent = to_clk_divider(parent_hw);
 
        /* Restore the dividers */
        if (!ret) {
-               clksel_shift = __ffs(parent->clksel_mask);
-               orig_v = __raw_readl(parent->clksel_reg);
+               orig_v = __raw_readl(parent->reg);
                dummy_v = orig_v;
 
                /* Write any other value different from the Read value */
-               dummy_v ^= (1 << clksel_shift);
-               __raw_writel(dummy_v, parent->clksel_reg);
+               dummy_v ^= (1 << parent->shift);
+               __raw_writel(dummy_v, parent->reg);
 
                /* Write the original divider */
-               __raw_writel(orig_v, parent->clksel_reg);
+               __raw_writel(orig_v, parent->reg);
        }
 
        return ret;
index d25a95fe99216c582f97b0a0aee860c3573eaf68..7341eff63f56df8f58d5b72d8db9a71feb86112d 100644 (file)
@@ -1356,13 +1356,27 @@ static void _enable_sysc(struct omap_hwmod *oh)
 
        clkdm = _get_clkdm(oh);
        if (sf & SYSC_HAS_SIDLEMODE) {
+               if (oh->flags & HWMOD_SWSUP_SIDLE ||
+                   oh->flags & HWMOD_SWSUP_SIDLE_ACT) {
+                       idlemode = HWMOD_IDLEMODE_NO;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
+
+               /*
+                * This is special handling for some IPs like
+                * 32k sync timer. Force them to idle!
+                */
                clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU);
                if (clkdm_act && !(oh->class->sysc->idlemodes &
                                   (SIDLE_SMART | SIDLE_SMART_WKUP)))
                        idlemode = HWMOD_IDLEMODE_FORCE;
-               else
-                       idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                               HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1391,10 +1405,6 @@ static void _enable_sysc(struct omap_hwmod *oh)
            (sf & SYSC_HAS_CLOCKACTIVITY))
                _set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-       /* If slave is in SMARTIDLE, also enable wakeup */
-       if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
-               _enable_wakeup(oh, &v);
-
        _write_sysconfig(v, oh);
 
        /*
@@ -1430,13 +1440,16 @@ static void _idle_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
-               if (oh->flags & HWMOD_SWSUP_SIDLE ||
-                   !(oh->class->sysc->idlemodes &
-                     (SIDLE_SMART | SIDLE_SMART_WKUP)))
+               if (oh->flags & HWMOD_SWSUP_SIDLE) {
                        idlemode = HWMOD_IDLEMODE_FORCE;
-               else
-                       idlemode = HWMOD_IDLEMODE_SMART;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1455,10 +1468,6 @@ static void _idle_sysc(struct omap_hwmod *oh)
                _set_master_standbymode(oh, idlemode, &v);
        }
 
-       /* If slave is in SMARTIDLE, also enable wakeup */
-       if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
-               _enable_wakeup(oh, &v);
-
        _write_sysconfig(v, oh);
 }
 
@@ -2065,7 +2074,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh)
  * do so is present in the hwmod data, then call it and pass along the
  * return value; otherwise, return 0.
  */
-static int __init _enable_preprogram(struct omap_hwmod *oh)
+static int _enable_preprogram(struct omap_hwmod *oh)
 {
        if (!oh->class->enable_preprogram)
                return 0;
@@ -2245,42 +2254,6 @@ static int _idle(struct omap_hwmod *oh)
        return 0;
 }
 
-/**
- * omap_hwmod_set_ocp_autoidle - set the hwmod's OCP autoidle bit
- * @oh: struct omap_hwmod *
- * @autoidle: desired AUTOIDLE bitfield value (0 or 1)
- *
- * Sets the IP block's OCP autoidle bit in hardware, and updates our
- * local copy. Intended to be used by drivers that require
- * direct manipulation of the AUTOIDLE bits.
- * Returns -EINVAL if @oh is null or is not in the ENABLED state, or passes
- * along the return value from _set_module_autoidle().
- *
- * Any users of this function should be scrutinized carefully.
- */
-int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle)
-{
-       u32 v;
-       int retval = 0;
-       unsigned long flags;
-
-       if (!oh || oh->_state != _HWMOD_STATE_ENABLED)
-               return -EINVAL;
-
-       spin_lock_irqsave(&oh->_lock, flags);
-
-       v = oh->_sysc_cache;
-
-       retval = _set_module_autoidle(oh, autoidle, &v);
-
-       if (!retval)
-               _write_sysconfig(v, oh);
-
-       spin_unlock_irqrestore(&oh->_lock, flags);
-
-       return retval;
-}
-
 /**
  * _shutdown - shutdown an omap_hwmod
  * @oh: struct omap_hwmod *
@@ -3179,38 +3152,6 @@ error:
        return ret;
 }
 
-/**
- * omap_hwmod_set_slave_idlemode - set the hwmod's OCP slave idlemode
- * @oh: struct omap_hwmod *
- * @idlemode: SIDLEMODE field bits (shifted to bit 0)
- *
- * Sets the IP block's OCP slave idlemode in hardware, and updates our
- * local copy.  Intended to be used by drivers that have some erratum
- * that requires direct manipulation of the SIDLEMODE bits.  Returns
- * -EINVAL if @oh is null, or passes along the return value from
- * _set_slave_idlemode().
- *
- * XXX Does this function have any current users?  If not, we should
- * remove it; it is better to let the rest of the hwmod code handle this.
- * Any users of this function should be scrutinized carefully.
- */
-int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode)
-{
-       u32 v;
-       int retval = 0;
-
-       if (!oh)
-               return -EINVAL;
-
-       v = oh->_sysc_cache;
-
-       retval = _set_slave_idlemode(oh, idlemode, &v);
-       if (!retval)
-               _write_sysconfig(v, oh);
-
-       return retval;
-}
-
 /**
  * omap_hwmod_lookup - look up a registered omap_hwmod by name
  * @name: name of the omap_hwmod to look up
index fe5962921f07244e602429b758f0d5ccecb7398c..0c898f58ac9bb490e8873a24074e53c1480530a6 100644 (file)
@@ -463,6 +463,9 @@ struct omap_hwmod_omap4_prcm {
  *     is kept in force-standby mode. Failing to do so causes PM problems
  *     with musb on OMAP3630 at least. Note that musb has a dedicated register
  *     to control MSTANDBY signal when MIDLEMODE is set to force-standby.
+ * HWMOD_SWSUP_SIDLE_ACT: omap_hwmod code should manually bring the module
+ *     out of idle, but rely on smart-idle to the put it back in idle,
+ *     so the wakeups are still functional (Only known case for now is UART)
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -476,6 +479,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_EXT_OPT_MAIN_CLK                 (1 << 9)
 #define HWMOD_BLOCK_WFI                                (1 << 10)
 #define HWMOD_FORCE_MSTANDBY                   (1 << 11)
+#define HWMOD_SWSUP_SIDLE_ACT                  (1 << 12)
 
 /*
  * omap_hwmod._int_flags definitions
@@ -641,9 +645,6 @@ int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name);
 int omap_hwmod_enable_clocks(struct omap_hwmod *oh);
 int omap_hwmod_disable_clocks(struct omap_hwmod *oh);
 
-int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode);
-int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle);
-
 int omap_hwmod_reset(struct omap_hwmod *oh);
 void omap_hwmod_ocp_barrier(struct omap_hwmod *oh);
 
index c8c64b3e1acc009efa24b91da269034b8603e714..d05fc7b54567f0dbb6597a1d641ce130caf7ee9b 100644 (file)
@@ -512,6 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
        .mpu_irqs       = omap2_uart1_mpu_irqs,
        .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -531,6 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
        .mpu_irqs       = omap2_uart2_mpu_irqs,
        .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -550,6 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
        .mpu_irqs       = omap2_uart3_mpu_irqs,
        .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
index 01d8f324450a951f89174b4cebdf82f8b9dad6b1..69337af748cc5ddb6c58c11f35b12708f3e432ab 100644 (file)
@@ -1995,6 +1995,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
        .name           = "uart1",
        .class          = &uart_class,
        .clkdm_name     = "l4_wkup_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart1_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_wkupdm_ck",
@@ -2006,6 +2007,13 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
        },
 };
 
+/* uart2 */
+static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
+       { .name = "tx", .dma_req = 28, },
+       { .name = "rx", .dma_req = 29, },
+       { .dma_req = -1 }
+};
+
 static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
        { .irq = 73 + OMAP_INTC_START, },
        { .irq = -1 },
@@ -2015,8 +2023,9 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
        .name           = "uart2",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart2_irqs,
-       .sdma_reqs      = uart1_edma_reqs,
+       .sdma_reqs      = uart2_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
        .prcm           = {
                .omap4  = {
@@ -2042,6 +2051,7 @@ static struct omap_hwmod am33xx_uart3_hwmod = {
        .name           = "uart3",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart3_irqs,
        .sdma_reqs      = uart3_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
@@ -2062,6 +2072,7 @@ static struct omap_hwmod am33xx_uart4_hwmod = {
        .name           = "uart4",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart4_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
@@ -2082,6 +2093,7 @@ static struct omap_hwmod am33xx_uart5_hwmod = {
        .name           = "uart5",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart5_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
@@ -2102,6 +2114,7 @@ static struct omap_hwmod am33xx_uart6_hwmod = {
        .name           = "uart6",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart6_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
index 4083606ea1da15e7efeb279d7c33c3f3cc7618b3..31c7126eb3bb65d724cfc71ed0c0733a4202e4d6 100644 (file)
@@ -490,6 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
        .mpu_irqs       = omap2_uart1_mpu_irqs,
        .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -508,6 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
        .mpu_irqs       = omap2_uart2_mpu_irqs,
        .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -526,6 +528,7 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
        .mpu_irqs       = omap2_uart3_mpu_irqs,
        .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = OMAP3430_PER_MOD,
@@ -555,6 +558,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
        .mpu_irqs       = uart4_mpu_irqs,
        .sdma_reqs      = uart4_sdma_reqs,
        .main_clk       = "uart4_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = OMAP3430_PER_MOD,
index eaba9dc91a0d9824a530cf185537a28a881b5a24..848b6dc67590716f7663151f46a5809f097e9cf1 100644 (file)
@@ -3434,6 +3434,7 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {
        .name           = "uart1",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart1_irqs,
        .sdma_reqs      = omap44xx_uart1_sdma_reqs,
        .main_clk       = "func_48m_fclk",
@@ -3462,6 +3463,7 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {
        .name           = "uart2",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart2_irqs,
        .sdma_reqs      = omap44xx_uart2_sdma_reqs,
        .main_clk       = "func_48m_fclk",
@@ -3490,7 +3492,8 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
        .name           = "uart3",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
-       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
+                               HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart3_irqs,
        .sdma_reqs      = omap44xx_uart3_sdma_reqs,
        .main_clk       = "func_48m_fclk",
@@ -3519,6 +3522,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
        .name           = "uart4",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart4_irqs,
        .sdma_reqs      = omap44xx_uart4_sdma_reqs,
        .main_clk       = "func_48m_fclk",
index c01859398b5448cd76ddb622791c2d412907ae76..5a2d8034c8def0462fc9464c0b765ba3d62f9efb 100644 (file)
@@ -546,8 +546,10 @@ static void __init prcm_setup_regs(void)
        /* Clear any pending PRCM interrupts */
        omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 
-       if (omap3_has_iva())
-               omap3_iva_idle();
+       /*
+        * We need to idle iva2_pwrdm even on am3703 with no iva2.
+        */
+       omap3_iva_idle();
 
        omap3_d2d_idle();
 }
index 8396b5b7e912d91e48adbcc3b785041845b9f677..f6601563aa6903f93e04f9656a58593907f89b0e 100644 (file)
@@ -95,38 +95,9 @@ static void omap_uart_enable_wakeup(struct device *dev, bool enable)
                omap_hwmod_disable_wakeup(od->hwmods[0]);
 }
 
-/*
- * Errata i291: [UART]:Cannot Acknowledge Idle Requests
- * in Smartidle Mode When Configured for DMA Operations.
- * WA: configure uart in force idle mode.
- */
-static void omap_uart_set_noidle(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct omap_device *od = to_omap_device(pdev);
-
-       omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
-}
-
-static void omap_uart_set_smartidle(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct omap_device *od = to_omap_device(pdev);
-       u8 idlemode;
-
-       if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP)
-               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
-       else
-               idlemode = HWMOD_IDLEMODE_SMART;
-
-       omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode);
-}
-
 #else
 static void omap_uart_enable_wakeup(struct device *dev, bool enable)
 {}
-static void omap_uart_set_noidle(struct device *dev) {}
-static void omap_uart_set_smartidle(struct device *dev) {}
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_OMAP_MUX
@@ -299,8 +270,6 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
        omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
        omap_up.flags = UPF_BOOT_AUTOCONF;
        omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
-       omap_up.set_forceidle = omap_uart_set_smartidle;
-       omap_up.set_noidle = omap_uart_set_noidle;
        omap_up.enable_wakeup = omap_uart_enable_wakeup;
        omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
        omap_up.dma_rx_timeout = info->dma_rx_timeout;
index b97fd672e89d74f40e3ebfc0b1e18c365be785f3..f8a6db9239bf3c3ff7a88c29925cdce0961590cb 100644 (file)
@@ -199,13 +199,6 @@ void __init orion5x_init_early(void)
 
        orion_time_set_base(TIMER_VIRT_BASE);
 
-       /*
-        * Some Orion5x devices allocate their coherent buffers from atomic
-        * context. Increase size of atomic coherent pool to make sure such
-        * the allocations won't fail.
-        */
-       init_dma_coherent_pool_size(SZ_1M);
-
        /* Initialize the MBUS driver */
        orion5x_pcie_id(&dev, &rev);
        if (dev == MV88F5281_DEV_ID)
index 9936c180bf016bbf769e60a16309c603c69c1a6f..8f595c0cc8d9390124a2378b04e62f8b95307e34 100644 (file)
@@ -101,8 +101,10 @@ static int __init sirfsoc_of_pwrc_init(void)
        struct device_node *np;
 
        np = of_find_matching_node(NULL, pwrc_ids);
-       if (!np)
-               panic("unable to find compatible pwrc node in dtb\n");
+       if (!np) {
+               pr_err("unable to find compatible sirf pwrc node in dtb\n");
+               return -ENOENT;
+       }
 
        /*
         * pwrc behind rtciobrg is not located in memory space
index 435019ca0a4893f231b9f79c6cba8eb61e0f17e9..d5e0cbc934c0c0f05db1480c350108963d7bcfa9 100644 (file)
@@ -28,8 +28,10 @@ static int __init sirfsoc_of_rstc_init(void)
        struct device_node *np;
 
        np = of_find_matching_node(NULL, rstc_ids);
-       if (!np)
-               panic("unable to find compatible rstc node in dtb\n");
+       if (!np) {
+               pr_err("unable to find compatible sirf rstc node in dtb\n");
+               return -ENOENT;
+       }
 
        sirfsoc_rstc_base = of_iomap(np, 0);
        if (!sirfsoc_rstc_base)
index b85b2882dbd05cc57d644d15f9349abe75cc7293..2b04c8011e13ef30494dfd1d8a92c78331b96043 100644 (file)
@@ -377,7 +377,7 @@ static struct resource sh_eth_resources[] = {
 };
 
 static struct platform_device sh_eth_device = {
-       .name = "sh-eth",
+       .name = "r8a7740-gether",
        .id = -1,
        .dev = {
                .platform_data = &sh_eth_platdata,
index 91052855cc1216245eb71fe14cb13d5e5f85c63d..b9594e911ce7680d28448fa50cf854ceb91b33bf 100644 (file)
@@ -212,8 +212,8 @@ static struct platform_device *marzen_devices[] __initdata = {
 static struct usb_phy *phy;
 static int usb_power_on(struct platform_device *pdev)
 {
-       if (!phy)
-               return -EIO;
+       if (IS_ERR(phy))
+               return PTR_ERR(phy);
 
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
@@ -225,7 +225,7 @@ static int usb_power_on(struct platform_device *pdev)
 
 static void usb_power_off(struct platform_device *pdev)
 {
-       if (!phy)
+       if (IS_ERR(phy))
                return;
 
        usb_phy_shutdown(phy);
index c0d39aa6de5092f0137c0ee187a0376b4edcb00e..ae93f94d15bd65a6c9de847bb95e6d5d2dc0fab1 100644 (file)
@@ -591,7 +591,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("e6860000.sdhi",          &mstp_clks[MSTP313]),
        CLKDEV_DEV_ID("sh_mmcif",               &mstp_clks[MSTP312]),
        CLKDEV_DEV_ID("e6bd0000.mmcif",         &mstp_clks[MSTP312]),
-       CLKDEV_DEV_ID("sh-eth",                 &mstp_clks[MSTP309]),
+       CLKDEV_DEV_ID("r8a7740-gether",         &mstp_clks[MSTP309]),
 
        CLKDEV_DEV_ID("sh_mobile_sdhi.2",       &mstp_clks[MSTP415]),
        CLKDEV_DEV_ID("e6870000.sdhi",          &mstp_clks[MSTP415]),
index cd6855290b1fe27f56ba44dfa11923903b7c17bb..9614b07254b2cc1c2d8f90db3c63273749162f03 100644 (file)
@@ -77,7 +77,7 @@ static struct clk mstp_clks[MSTP_NR] = {
 
 static struct clk_lookup lookups[] = {
        /* MSTP32 clocks */
-       CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */
+       CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
        CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */
        CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */
        CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */
index 31d5cd4d97879f7d9243c9310eb0ea1dd8c7f047..2f7e5245a690f4aebff237cf572a29e7d16b82df 100644 (file)
@@ -163,7 +163,7 @@ static struct clk_lookup lookups[] = {
        /* MSTP32 clocks */
        CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */
        CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */
-       CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */
+       CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
        CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */
        CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */
        CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
index fdf3894b1cc31e0291d2fc00e35bfbe61185f735..9696f36468643956c37466a0e77d5de5292b2e5f 100644 (file)
@@ -252,7 +252,7 @@ static struct sh_timer_config cmt10_platform_data = {
        .name = "CMT10",
        .channel_offset = 0x10,
        .timer_bit = 0,
-       .clockevent_rating = 125,
+       .clockevent_rating = 80,
        .clocksource_rating = 125,
 };
 
index d259c782d742873f0e0c53b1a43e7bcd85069286..5b045e302b4359d65b91ac31110dd566d2e10391 100644 (file)
@@ -1,5 +1,6 @@
 config ARCH_SUNXI
        bool "Allwinner A1X SOCs" if ARCH_MULTI_V7
+       select ARCH_REQUIRE_GPIOLIB
        select CLKSRC_MMIO
        select CLKSRC_OF
        select COMMON_CLK
index 9e8bdfa2b36915e4db7a8352753626e7f41b682e..31e69a019bdd999bd53338d669a090697122922b 100644 (file)
@@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "missing register base\n");
-               return -ENOMEM;
-       }
-
        emc_regbase = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(emc_regbase))
                return PTR_ERR(emc_regbase);
index 6a4387e39df809f1b21d899b89d2212924ae19cd..b19b07204aafbeb53ced9f4226164ecd9c036cff 100644 (file)
@@ -51,6 +51,7 @@ config MACH_MOP500
        bool "U8500 Development platform, MOP500 versions"
        select I2C
        select I2C_NOMADIK
+       select REGULATOR
        select REGULATOR_FIXED_VOLTAGE
        select SOC_BUS
        select UX500_SOC_DB8500
index 33c353bc1c4acacb8bb02cfea52ab7b1beaec2db..d6b7c8556fa1a79b4bcc0d4d735a1ed88ee24c4f 100644 (file)
@@ -374,6 +374,7 @@ static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
 static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
        /* supplies to the display/camera */
        [AB8500_LDO_AUX1] = {
+               .supply_regulator = "ab8500-ext-supply3",
                .constraints = {
                        .name = "V-DISPLAY",
                        .min_uV = 2800000,
@@ -387,6 +388,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
        },
        /* supplies to the on-board eMMC */
        [AB8500_LDO_AUX2] = {
+               .supply_regulator = "ab8500-ext-supply3",
                .constraints = {
                        .name = "V-eMMC1",
                        .min_uV = 1100000,
@@ -402,6 +404,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
        },
        /* supply for VAUX3, supplies to SDcard slots */
        [AB8500_LDO_AUX3] = {
+               .supply_regulator = "ab8500-ext-supply3",
                .constraints = {
                        .name = "V-MMC-SD",
                        .min_uV = 1100000,
index 3cd555ac6d0a3e5c81478dfbf71f79f10e72b73b..78389de94dde34f6eaee0f73c479d33eda4846dd 100644 (file)
@@ -623,7 +623,7 @@ static void __init mop500_init_machine(void)
        sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL;
 
        mop500_pinmaps_init();
-       parent = u8500_init_devices(&ab8500_platdata);
+       parent = u8500_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
                mop500_platform_devs[i]->dev.parent = parent;
@@ -660,7 +660,7 @@ static void __init snowball_init_machine(void)
        sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO;
 
        snowball_pinmaps_init();
-       parent = u8500_init_devices(&ab8500_platdata);
+       parent = u8500_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
                snowball_platform_devs[i]->dev.parent = parent;
@@ -698,7 +698,7 @@ static void __init hrefv60_init_machine(void)
        sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO;
 
        hrefv60_pinmaps_init();
-       parent = u8500_init_devices(&ab8500_platdata);
+       parent = u8500_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
                mop500_platform_devs[i]->dev.parent = parent;
index e90b5ab23b6daf7e691bbff5b74fd6c165e96518..46cca52890bcfd486f30018556059c12bcf22391 100644 (file)
@@ -206,7 +206,7 @@ static struct device * __init db8500_soc_device_init(void)
 /*
  * This function is called from the board init
  */
-struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
+struct device * __init u8500_init_devices(void)
 {
        struct device *parent;
        int i;
@@ -220,8 +220,6 @@ struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
        for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
                platform_devs[i]->dev.parent = parent;
 
-       db8500_prcmu_device.dev.platform_data = ab8500;
-
        platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
 
        return parent;
@@ -278,7 +276,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
        OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu",
                        &db8500_prcmu_pdata),
-       OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x", NULL),
+       OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x.0", NULL),
        /* Requires device name bindings. */
        OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
                "pinctrl-db8500", NULL),
index 317a2be129fb12be9b78bcdcb2286d05496ddf6f..a45dd09daed9212c2d799ce1ef9421be8d13bab5 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/proc-fns.h>
 
 #include "db8500-regs.h"
+#include "id.h"
 
 static atomic_t master = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(master_lock);
@@ -114,6 +115,9 @@ static struct cpuidle_driver ux500_idle_driver = {
 
 int __init ux500_idle_init(void)
 {
+       if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
+               return -ENODEV;
+
        /* Configure wake up reasons */
        prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
                             PRCMU_WAKEUP(ABB));
index bddce2b493722f84c5821103a38eea4a85d8833d..cad3ca86c540f7eb67c454ce44f0317b80312ad3 100644 (file)
@@ -18,7 +18,7 @@
 void __init ux500_map_io(void);
 extern void __init u8500_map_io(void);
 
-extern struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500);
+extern struct device * __init u8500_init_devices(void);
 
 extern void __init ux500_init_irq(void);
 extern void __init ux500_init_late(void);
index 1dd281efc02035dac37cc971a0db4c69c98c8a11..f5c33df7a5971731844478e9a29024c660c35c52 100644 (file)
@@ -173,6 +173,7 @@ static const char * const vt8500_dt_compat[] = {
        "wm,wm8505",
        "wm,wm8750",
        "wm,wm8850",
+       NULL
 };
 
 DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)")
index 1a643ee8e082464108cf9280266fe73016b982be..f50d223a0bd31271ed73beabd60679c137ccb440 100644 (file)
@@ -900,8 +900,7 @@ void bpf_jit_compile(struct sk_filter *fp)
 #endif
 
        alloc_size = 4 * ctx.idx;
-       ctx.target = module_alloc(max(sizeof(struct work_struct),
-                                     alloc_size));
+       ctx.target = module_alloc(alloc_size);
        if (unlikely(ctx.target == NULL))
                goto out;
 
@@ -927,19 +926,8 @@ out:
        return;
 }
 
-static void bpf_jit_free_worker(struct work_struct *work)
-{
-       module_free(NULL, work);
-}
-
 void bpf_jit_free(struct sk_filter *fp)
 {
-       struct work_struct *work;
-
-       if (fp->bpf_func != sk_run_filter) {
-               work = (struct work_struct *)fp->bpf_func;
-
-               INIT_WORK(work, bpf_jit_free_worker);
-               schedule_work(work);
-       }
+       if (fp->bpf_func != sk_run_filter)
+               module_free(NULL, fp->bpf_func);
 }
index 251f827271e918adf62b378d59aef9e7272d8b70..c019b7aaf776b7d622aa5c97146070aee57ef523 100644 (file)
@@ -383,7 +383,7 @@ static struct resource orion_ge10_shared_resources[] = {
 
 static struct platform_device orion_ge10_shared = {
        .name           = MV643XX_ETH_SHARED_NAME,
-       .id             = 1,
+       .id             = 2,
        .dev            = {
                .platform_data  = &orion_ge10_shared_data,
        },
@@ -398,8 +398,8 @@ static struct resource orion_ge10_resources[] = {
 
 static struct platform_device orion_ge10 = {
        .name           = MV643XX_ETH_NAME,
-       .id             = 1,
-       .num_resources  = 2,
+       .id             = 2,
+       .num_resources  = 1,
        .resource       = orion_ge10_resources,
        .dev            = {
                .coherent_dma_mask      = DMA_BIT_MASK(32),
@@ -432,7 +432,7 @@ static struct resource orion_ge11_shared_resources[] = {
 
 static struct platform_device orion_ge11_shared = {
        .name           = MV643XX_ETH_SHARED_NAME,
-       .id             = 1,
+       .id             = 3,
        .dev            = {
                .platform_data  = &orion_ge11_shared_data,
        },
@@ -447,8 +447,8 @@ static struct resource orion_ge11_resources[] = {
 
 static struct platform_device orion_ge11 = {
        .name           = MV643XX_ETH_NAME,
-       .id             = 1,
-       .num_resources  = 2,
+       .id             = 3,
+       .num_resources  = 1,
        .resource       = orion_ge11_resources,
        .dev            = {
                .coherent_dma_mask      = DMA_BIT_MASK(32),
index e06fc5fefa14c9ec7d0b46b002da64bc8cb425f8..d9a24f605a2b786dce8a85744c662f310f7d80a8 100644 (file)
@@ -10,6 +10,7 @@
 
 #ifndef __PLAT_COMMON_H
 #include <linux/mv643xx_eth.h>
+#include <linux/platform_data/usb-ehci-orion.h>
 
 struct dsa_platform_data;
 struct mv_sata_platform_data;
index ca07cb1b155a702ce5e3c0cc0de74cfc5a5fe3a4..79690f2f6d3f8a5164495c7758b392cc43bae268 100644 (file)
@@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev)
        }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_err(dev, "failed to find registers\n");
-               return -ENXIO;
-       }
-
        adc->regs = devm_ioremap_resource(dev, regs);
        if (IS_ERR(adc->regs))
                return PTR_ERR(adc->regs);
index 30c2fe243f7658c0002c93ce4c9aaf154e1ca1db..0f9c3f431a5f6d1b1c0874febb98f06504f650aa 100644 (file)
@@ -311,9 +311,9 @@ struct platform_device s5p_device_jpeg = {
 #ifdef CONFIG_S5P_DEV_FIMD0
 static struct resource s5p_fimd0_resource[] = {
        [0] = DEFINE_RES_MEM(S5P_PA_FIMD0, SZ_32K),
-       [1] = DEFINE_RES_IRQ(IRQ_FIMD0_VSYNC),
-       [2] = DEFINE_RES_IRQ(IRQ_FIMD0_FIFO),
-       [3] = DEFINE_RES_IRQ(IRQ_FIMD0_SYSTEM),
+       [1] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_VSYNC, "vsync"),
+       [2] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_FIFO, "fifo"),
+       [3] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_SYSTEM, "lcd_sys"),
 };
 
 struct platform_device s5p_device_fimd0 = {
index 438b24846e7f3b4505ac22364c0c702c6f594c27..02b66d723d1a04b46dc288f88b0c0a710bc52350 100644 (file)
@@ -66,6 +66,9 @@ uart_rd(unsigned int reg)
 
 static void putc(int ch)
 {
+       if (!config_enabled(CONFIG_DEBUG_LL))
+               return;
+
        if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) {
                int level;
 
@@ -118,7 +121,12 @@ static void arch_decomp_error(const char *x)
 #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO
 static inline void arch_enable_uart_fifo(void)
 {
-       u32 fifocon = uart_rd(S3C2410_UFCON);
+       u32 fifocon;
+
+       if (!config_enabled(CONFIG_DEBUG_LL))
+               return;
+
+       fifocon = uart_rd(S3C2410_UFCON);
 
        if (!(fifocon & S3C2410_UFCON_FIFOMODE)) {
                fifocon |= S3C2410_UFCON_RESETBOTH;
index 53210ec4e8ece7fdbe2f1eb7893692fd15569f38..bd7124c87fea2d4b3551eed54752128b4c97fd71 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 #include <linux/serial_core.h>
 #include <linux/io.h>
 
@@ -261,7 +262,8 @@ static int s3c_pm_enter(suspend_state_t state)
         * require a full power-cycle)
        */
 
-       if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
+       if (!of_have_populated_dt() &&
+           !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
            !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
                printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
                printk(KERN_ERR "%s: Aborting sleep\n", __func__);
@@ -270,8 +272,11 @@ static int s3c_pm_enter(suspend_state_t state)
 
        /* save all necessary core registers not covered by the drivers */
 
-       samsung_pm_save_gpios();
-       samsung_pm_saved_gpios();
+       if (!of_have_populated_dt()) {
+               samsung_pm_save_gpios();
+               samsung_pm_saved_gpios();
+       }
+
        s3c_pm_save_uarts();
        s3c_pm_save_core();
 
@@ -310,8 +315,11 @@ static int s3c_pm_enter(suspend_state_t state)
 
        s3c_pm_restore_core();
        s3c_pm_restore_uarts();
-       samsung_pm_restore_gpios();
-       s3c_pm_restored_gpios();
+
+       if (!of_have_populated_dt()) {
+               samsung_pm_restore_gpios();
+               s3c_pm_restored_gpios();
+       }
 
        s3c_pm_debug_init();
 
index 323ce1a62bbfa3465230632d5d7d35c458f380fa..46e17492fd1f3ecccda92c660474c805707b788b 100644 (file)
@@ -60,7 +60,7 @@ ENTRY(vfp_testing_entry)
        str     r11, [r10, #TI_PREEMPT]
 #endif
        ldr     r0, VFP_arch_address
-       str     r5, [r0]                @ known non-zero value
+       str     r0, [r0]                @ set to non-zero value
        mov     pc, r9                  @ we have handled the fault
 ENDPROC(vfp_testing_entry)
 
index d30042e39974f949e6ab3cc81f88f7c6dfc51c4d..13609e01f4b786293219f2c9e865837c9262f76e 100644 (file)
@@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
 
-static int __init xen_secondary_init(unsigned int cpu)
+static void __init xen_percpu_init(void *unused)
 {
        struct vcpu_register_vcpu_info info;
        struct vcpu_info *vcpup;
        int err;
+       int cpu = get_cpu();
 
        pr_info("Xen: initializing cpu%d\n", cpu);
        vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
@@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu)
        info.offset = offset_in_page(vcpup);
 
        err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
-       if (err) {
-               pr_debug("register_vcpu_info failed: err=%d\n", err);
-       } else {
-               /* This cpu is using the registered vcpu info, even if
-                  later ones fail to. */
-               per_cpu(xen_vcpu, cpu) = vcpup;
-       }
-       return 0;
+       BUG_ON(err);
+       per_cpu(xen_vcpu, cpu) = vcpup;
+
+       enable_percpu_irq(xen_events_irq, 0);
 }
 
 static void xen_restart(char str, const char *cmd)
@@ -208,7 +205,6 @@ static int __init xen_guest_init(void)
        const char *version = NULL;
        const char *xen_prefix = "xen,xen-";
        struct resource res;
-       int i;
 
        node = of_find_compatible_node(NULL, NULL, "xen,xen");
        if (!node) {
@@ -265,19 +261,23 @@ static int __init xen_guest_init(void)
                                               sizeof(struct vcpu_info));
        if (xen_vcpu_info == NULL)
                return -ENOMEM;
-       for_each_online_cpu(i)
-               xen_secondary_init(i);
 
        gnttab_init();
        if (!xen_initial_domain())
                xenbus_probe(NULL);
 
+       return 0;
+}
+core_initcall(xen_guest_init);
+
+static int __init xen_pm_init(void)
+{
        pm_power_off = xen_power_off;
        arm_pm_restart = xen_restart;
 
        return 0;
 }
-core_initcall(xen_guest_init);
+subsys_initcall(xen_pm_init);
 
 static irqreturn_t xen_arm_callback(int irq, void *arg)
 {
@@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static __init void xen_percpu_enable_events(void *unused)
-{
-       enable_percpu_irq(xen_events_irq, 0);
-}
-
 static int __init xen_init_events(void)
 {
        if (!xen_domain() || xen_events_irq < 0)
@@ -303,7 +298,7 @@ static int __init xen_init_events(void)
                return -EINVAL;
        }
 
-       on_each_cpu(xen_percpu_enable_events, NULL, 0);
+       on_each_cpu(xen_percpu_init, NULL, 0);
 
        return 0;
 }
index 48347dcf056681641936a8cedcfeb88272fc8f7e..56b3f6d447ae10b8d53f9cfb767826fe90a97c11 100644 (file)
@@ -122,8 +122,6 @@ endmenu
 
 menu "Kernel Features"
 
-source "kernel/time/Kconfig"
-
 config ARM64_64K_PAGES
        bool "Enable 64KB pages support"
        help
index c8eedc6049844dbb0d2142fa9aca90478e163025..5aceb83b3f5c3c5dd9dc1168a157bb0e41f1c8e9 100644 (file)
@@ -82,7 +82,7 @@
 
        .macro  enable_dbg_if_not_stepping, tmp
        mrs     \tmp, mdscr_el1
-       tbnz    \tmp, #1, 9990f
+       tbnz    \tmp, #0, 9990f
        enable_dbg
 9990:
        .endm
index 7df1aad29b676d38f085d9ca8be52a2d0c960431..41b4f626d5548c10985313839892de44ad81e1ec 100644 (file)
@@ -34,6 +34,7 @@ EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 
 EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
 
 EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
index 0c3ba9f51376d33656607c67f18e135895357079..f4726dc054b3bbcdd7c7a5d98d3733b6a893ea3e 100644 (file)
@@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el)
  */
 static void clear_os_lock(void *unused)
 {
-       asm volatile("msr mdscr_el1, %0" : : "r" (0));
-       isb();
        asm volatile("msr oslar_el1, %0" : : "r" (0));
        isb();
 }
index ac974f48a7a25145cd8024a7c8cb77a21cdccad2..fbb6e18436598142cf0e4ea7429b63965f2ada4b 100644 (file)
@@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n)
        }
 }
 
-static struct console early_console = {
+static struct console early_console_dev = {
        .name =         "earlycon",
        .write =        early_write,
        .flags =        CON_PRINTBUFFER | CON_BOOT,
@@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf)
                early_base = early_io_map(paddr, EARLYCON_IOBASE);
 
        printch = match->printch;
-       register_console(&early_console);
+       early_console = &early_console_dev;
+       register_console(&early_console_dev);
 
        return 0;
 }
index c7e047049f2c1e26db2534478b07db95ec29ede7..1d1314280a03a678c4ef63092712d49079386cdd 100644 (file)
@@ -390,6 +390,16 @@ el0_sync_compat:
        b.eq    el0_fpsimd_exc
        cmp     x24, #ESR_EL1_EC_UNKNOWN        // unknown exception in EL0
        b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP15_32        // CP15 MRC/MCR trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP15_64        // CP15 MRRC/MCRR trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP14_MR        // CP14 MRC/MCR trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP14_LS        // CP14 LDC/STC trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP14_64        // CP14 MRRC/MCRR trap
+       b.eq    el0_undef
        cmp     x24, #ESR_EL1_EC_BREAKPT_EL0    // debug exception in EL0
        b.ge    el0_dbg
        b       el0_inv
index 6a9a5329259065a99ceea12c3fe391f5996c448b..add6ea616843139ff81a65f26c344ed9e69fd66d 100644 (file)
@@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p)
 #endif
 }
 
-static int __init arm64_of_clk_init(void)
+static int __init arm64_device_init(void)
 {
        of_clk_init(NULL);
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
        return 0;
 }
-arch_initcall(arm64_of_clk_init);
+arch_initcall(arm64_device_init);
 
 static DEFINE_PER_CPU(struct cpu, cpu_data);
 
@@ -305,13 +306,6 @@ static int __init topology_init(void)
 }
 subsys_initcall(topology_init);
 
-static int __init arm64_device_probe(void)
-{
-       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-       return 0;
-}
-device_initcall(arm64_device_probe);
-
 static const char *hwcap_str[] = {
        "fp",
        "asimd",
index 61d7dd29f756c300926410cef60ab523a5a7a7b1..f30852d28590358c6780a22c14049f92a124bdb8 100644 (file)
@@ -267,7 +267,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
                return;
 #endif
 
-       if (show_unhandled_signals) {
+       if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
+           printk_ratelimit()) {
                pr_info("%s[%d]: undefined instruction: pc=%p\n",
                        current->comm, task_pid_nr(current), pc);
                dump_instr(KERN_INFO, regs);
@@ -294,7 +295,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
        }
 #endif
 
-       if (show_unhandled_signals) {
+       if (show_unhandled_signals && printk_ratelimit()) {
                pr_info("%s[%d]: syscall %d\n", current->comm,
                        task_pid_nr(current), (int)regs->syscallno);
                dump_instr("", regs);
@@ -310,14 +311,20 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
  */
 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
+       siginfo_t info;
+       void __user *pc = (void __user *)instruction_pointer(regs);
        console_verbose();
 
        pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
                handler[reason], esr);
+       __show_regs(regs);
+
+       info.si_signo = SIGILL;
+       info.si_errno = 0;
+       info.si_code  = ILL_ILLOPC;
+       info.si_addr  = pc;
 
-       die("Oops - bad mode", regs, 0);
-       local_irq_disable();
-       panic("bad mode");
+       arm64_notify_die("Oops - bad mode", regs, &info, 0);
 }
 
 void __pte_error(const char *file, int line, unsigned long val)
index abe69b80cf7f674d9eb25b884a33e78a223aeb65..48a386094fa3cf98a7e8af3ae8d3b9ba5cce6c21 100644 (file)
@@ -52,7 +52,7 @@ loop1:
        add     x2, x2, #4                      // add 4 (line length offset)
        mov     x4, #0x3ff
        and     x4, x4, x1, lsr #3              // find maximum number on the way size
-       clz     x5, x4                          // find bit position of way size increment
+       clz     w5, w4                          // find bit position of way size increment
        mov     x7, #0x7fff
        and     x7, x7, x1, lsr #13             // extract max number of the index size
 loop2:
index 98af6e760cce6781a5b117d786bcd16fc136a0b1..1426468b77f3bb7b6df96d604851449db13ed0da 100644 (file)
@@ -113,7 +113,8 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
 {
        struct siginfo si;
 
-       if (show_unhandled_signals) {
+       if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
+           printk_ratelimit()) {
                pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
                        tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
                        addr, esr);
index f1d8b9bbfdadc5a64a63d781051e6675aa39e322..a82ae8868077f9f32749ccfcc490d166d6fddd1c 100644 (file)
@@ -119,8 +119,7 @@ ENTRY(__cpu_setup)
 
        mov     x0, #3 << 20
        msr     cpacr_el1, x0                   // Enable FP/ASIMD
-       mov     x0, #1
-       msr     oslar_el1, x0                   // Set the debug OS lock
+       msr     mdscr_el1, xzr                  // Reset mdscr_el1
        tlbi    vmalle1is                       // invalidate I + D TLBs
        /*
         * Memory region attributes for LPAE:
index bdc35589277f721805eed5675fe0bfabd8dc0aec..549903cfc2cbe6f223025ff1b04017939f7dd336 100644 (file)
@@ -205,6 +205,11 @@ config ARCH_DISCONTIGMEM_ENABLE
 config ARCH_SPARSEMEM_ENABLE
        def_bool n
 
+config NODES_SHIFT
+       int
+       default "2"
+       depends on NEED_MULTIPLE_NODES
+
 source "mm/Kconfig"
 
 config OWNERSHIP_TRACE
index 4dd4f78d3dcc80a0799a96ab7fc629fe09b728ac..d22af851f3f638b09fdc4394a05e2cebf8a10201 100644 (file)
@@ -2,3 +2,4 @@
 generic-y      += clkdev.h
 generic-y      += exec.h
 generic-y      += trace_clock.h
+generic-y      += param.h
diff --git a/arch/avr32/include/asm/numnodes.h b/arch/avr32/include/asm/numnodes.h
deleted file mode 100644 (file)
index 0b864d7..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_AVR32_NUMNODES_H
-#define __ASM_AVR32_NUMNODES_H
-
-/* Max 4 nodes */
-#define NODES_SHIFT    2
-
-#endif /* __ASM_AVR32_NUMNODES_H */
diff --git a/arch/avr32/include/asm/param.h b/arch/avr32/include/asm/param.h
deleted file mode 100644 (file)
index 009a167..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_AVR32_PARAM_H
-#define __ASM_AVR32_PARAM_H
-
-#include <uapi/asm/param.h>
-
-# define HZ            CONFIG_HZ
-# define USER_HZ       100             /* User interfaces are in "ticks" */
-# define CLOCKS_PER_SEC        (USER_HZ)       /* frequency at which times() counts */
-#endif /* __ASM_AVR32_PARAM_H */
index df53e7a467740a34846a4852c93122f3ef82a18e..3b85eaddf525f2be65d5772be3f28051afac7b43 100644 (file)
@@ -33,3 +33,4 @@ header-y += termbits.h
 header-y += termios.h
 header-y += types.h
 header-y += unistd.h
+generic-y += param.h
diff --git a/arch/avr32/include/uapi/asm/param.h b/arch/avr32/include/uapi/asm/param.h
deleted file mode 100644 (file)
index d28aa5e..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _UAPI__ASM_AVR32_PARAM_H
-#define _UAPI__ASM_AVR32_PARAM_H
-
-
-#ifndef HZ
-# define HZ            100
-#endif
-
-/* TODO: Should be configurable */
-#define EXEC_PAGESIZE  4096
-
-#ifndef NOGROUP
-# define NOGROUP       (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64
-
-#endif /* _UAPI__ASM_AVR32_PARAM_H */
index 37401f535126695e22af87b35739aba1242dd219..79b61798ebf8abce4d89150296ab879280933a9e 100644 (file)
@@ -74,4 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* __ASM_AVR32_SOCKET_H */
index 596f7305d93f017c43af7fc5f965ea4d0b988246..2c9412908024d4ce88d8945cf953b0d24437dcaf 100644 (file)
@@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
                        break;
                case R_AVR32_GOT18SW:
                        if ((relocation & 0xfffe0003) != 0
-                           && (relocation & 0xfffc0003) != 0xffff0000)
+                           && (relocation & 0xfffc0000) != 0xfffc0000)
                                return reloc_overflow(module, "R_AVR32_GOT18SW",
                                                     relocation);
                        relocation >>= 2;
index 5f2cdb3e428cd979601b6b15d5e04a24042c7a8e..daf5f19b61a12bd54e23acd09a0db76132228993 100644 (file)
@@ -2,9 +2,7 @@ if ETRAX_ARCH_V10
 
 config ETRAX_ETHERNET
        bool "Ethernet support"
-       depends on ETRAX_ARCH_V10
-       select ETHERNET
-       select NET_CORE
+       depends on ETRAX_ARCH_V10 && NETDEVICES
        select MII
        help
          This option enables the ETRAX 100LX built-in 10/100Mbit Ethernet
index c55971a40c34e8433faa9fc70d12566feed21045..4f223506a2aa8b837e39622ebc37ce942d91a632 100644 (file)
@@ -2,9 +2,7 @@ if ETRAX_ARCH_V32
 
 config ETRAX_ETHERNET
        bool "Ethernet support"
-       depends on ETRAX_ARCH_V32
-       select ETHERNET
-       select NET_CORE
+       depends on ETRAX_ARCH_V32 && NETDEVICES
        select MII
        help
          This option enables the ETRAX FS built-in 10/100Mbit Ethernet
index ba409c9947bc2df933ca1d393b418f082424a57a..47b1ec55092d62cb6df800a7c0ef5250c016ebbd 100644 (file)
@@ -76,6 +76,8 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_SOCKET_H */
 
 
index 31dbb5d8e13debd569420493091189e5bc199aef..dbc08520f22c581cfd20d3376d40b590486caf18 100644 (file)
@@ -74,5 +74,7 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_SOCKET_H */
 
index 5d1c6d0870e653e1f2cbfecf6e9ad7f6cf180741..a38d38a6520bfcc58fb3b4729cb1e38ecc37aa90 100644 (file)
@@ -74,4 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_SOCKET_H */
index c13064e422df6a28a1e544848376436bf4b8b54a..d1b04c4c95e308061c1292d09f0e6642489abdd7 100644 (file)
@@ -268,7 +268,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev)
 static int
 simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct simeth_local *local;
        struct in_device *in_dev;
        struct in_ifaddr **ifap = NULL;
index c3ffe3e54edc1a0457359b2c198d555330d09bf7..ef3a9de01954511a352fa5b24285136789425e21 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/machvec.h>
 
-#ifdef CONFIG_SMP
-# define tlb_fast_mode(tlb)    ((tlb)->nr == ~0U)
-#else
-# define tlb_fast_mode(tlb)    (1)
-#endif
-
 /*
  * If we can't allocate a page to make a big batch of page pointers
  * to work on, then just handle a few from the on-stack structure.
@@ -60,7 +54,7 @@
 
 struct mmu_gather {
        struct mm_struct        *mm;
-       unsigned int            nr;             /* == ~0U => fast mode */
+       unsigned int            nr;
        unsigned int            max;
        unsigned char           fullmm;         /* non-zero means full mm flush */
        unsigned char           need_flush;     /* really unmapped some PTEs? */
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 static inline void
 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
+       unsigned long i;
        unsigned int nr;
 
        if (!tlb->need_flush)
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
 
        /* lastly, release the freed pages */
        nr = tlb->nr;
-       if (!tlb_fast_mode(tlb)) {
-               unsigned long i;
-               tlb->nr = 0;
-               tlb->start_addr = ~0UL;
-               for (i = 0; i < nr; ++i)
-                       free_page_and_swap_cache(tlb->pages[i]);
-       }
+
+       tlb->nr = 0;
+       tlb->start_addr = ~0UL;
+       for (i = 0; i < nr; ++i)
+               free_page_and_swap_cache(tlb->pages[i]);
 }
 
 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
-       /*
-        * Use fast mode if only 1 CPU is online.
-        *
-        * It would be tempting to turn on fast-mode for full_mm_flush as well.  But this
-        * doesn't work because of speculative accesses and software prefetching: the page
-        * table of "mm" may (and usually is) the currently active page table and even
-        * though the kernel won't do any user-space accesses during the TLB shoot down, a
-        * compiler might use speculation or lfetch.fault on what happens to be a valid
-        * user-space address.  This in turn could trigger a TLB miss fault (or a VHPT
-        * walk) and re-insert a TLB entry we just removed.  Slow mode avoids such
-        * problems.  (We could make fast-mode work by switching the current task to a
-        * different "mm" during the shootdown.) --davidm 08/02/2002
-        */
-       tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
+       tlb->nr = 0;
        tlb->fullmm = full_mm_flush;
        tlb->start_addr = ~0UL;
 }
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
        tlb->need_flush = 1;
 
-       if (tlb_fast_mode(tlb)) {
-               free_page_and_swap_cache(page);
-               return 1; /* avoid calling tlb_flush_mmu */
-       }
-
        if (!tlb->nr && tlb->pages == tlb->local)
                __tlb_alloc_page(tlb);
 
index 6b4329f18b29f3878c7679c8a32a4a60d931904d..d3358b7606819bcc733affa863febe620ad90f4b 100644 (file)
@@ -83,4 +83,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_IA64_SOCKET_H */
index 2a3b59e0e171d0fb19897f561028b1f0482180b3..44aaf4639a4a0c5c9364ec4e609200b9be5e553e 100644 (file)
@@ -74,4 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_M32R_SOCKET_H */
index 90d3109c82f402df0356d43be23d9d965af68630..19325e117eeaaec7b844e1e50d328719077e3261 100644 (file)
@@ -1,55 +1,78 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-amiga"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_AMIGA=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
+CONFIG_AMIGA=y
 CONFIG_ZORRO=y
 CONFIG_AMIGA_PCMCIA=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_ZORRO_NAMES=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -57,25 +80,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -86,6 +121,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -99,22 +136,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -124,7 +170,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -133,18 +178,30 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_AMIGA=m
@@ -154,11 +211,13 @@ CONFIG_AMIGA_FLOPPY=y
 CONFIG_AMIGA_Z2RAM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_GAYLE=y
 CONFIG_BLK_DEV_BUDDHA=y
@@ -172,57 +231,77 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_A3000_SCSI=y
 CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_3COM is not set
 CONFIG_A2065=y
+CONFIG_ARIADNE=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_HYDRA=y
-CONFIG_ZORRO8390=y
 CONFIG_APNE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_AMIGA=y
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_MOUSE_PS2 is not set
@@ -233,11 +312,14 @@ CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
@@ -252,48 +334,64 @@ CONFIG_SOUND=m
 CONFIG_DMASOUND_PAULA=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_RP5C01=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
 CONFIG_AMIGA_BUILTIN_SERIAL=y
 CONFIG_SERIAL_CONSOLE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -332,10 +430,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -345,19 +456,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -373,6 +481,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 8f4f657fdbc67987daf0bcba1948390e43f6c163..14dc6ccda7f45349c82c0dd1f0d113831b3ac07b 100644 (file)
@@ -1,55 +1,76 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-apollo"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_APOLLO=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_APOLLO=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -57,25 +78,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -86,6 +119,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -99,22 +134,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -124,7 +168,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -133,21 +176,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -162,57 +218,74 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -221,47 +294,61 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_CLUT224 is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -300,10 +387,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -313,19 +413,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -341,6 +438,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 4571d33903fed1c1a1cc75f165e0c250e598183b..6d5370c914b265123b6963df409ca3b82d17f8cf 100644 (file)
@@ -1,53 +1,75 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-atari"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_ATARI=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_ATARI=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_STRAM_PROC=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -55,25 +77,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -84,6 +118,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -97,22 +133,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -122,7 +167,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -131,18 +175,30 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_ATARI=m
@@ -150,11 +206,13 @@ CONFIG_PARPORT_1284=y
 CONFIG_ATARI_FLOPPY=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_FALCON_IDE=y
 CONFIG_RAID_ATTRS=m
@@ -167,63 +225,81 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_ATARI_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
-CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_VETH=m
 CONFIG_ATARILANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_ATARI=y
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_ATARI=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_ATARI=y
@@ -233,47 +309,64 @@ CONFIG_SOUND=m
 CONFIG_DMASOUND_ATARI=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_UHID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
 CONFIG_ATARI_DSP56K=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -312,10 +405,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -325,19 +431,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -353,6 +456,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 12f211733ba02e8f58aa121e777b4b99c7cd6631..c015ddb6fd80635c81affec06369ced3f62185f8 100644 (file)
@@ -1,53 +1,74 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-bvme6000"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_VME=y
-CONFIG_BVME6000=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_BVME6000=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -55,25 +76,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -84,6 +117,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -97,22 +132,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -122,7 +166,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -131,21 +174,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -160,103 +216,131 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_BVME6000_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_BVME6000_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -295,10 +379,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -308,19 +405,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -336,7 +430,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
-CONFIG_CRC32=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 215389a5407fa215af1501ed2ea7d9a4d95fca5a..ec7382d8afff5390a4210a45ba348637605c4f59 100644 (file)
@@ -1,54 +1,76 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-hp300"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_HP300=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_HP300=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -56,25 +78,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -85,6 +119,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -98,22 +134,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -123,7 +168,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -132,21 +176,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -161,59 +218,77 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_HPLANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_INPUT_MISC=y
 CONFIG_HP_SDC_RTC=m
-# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_SERPORT=m
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -222,47 +297,60 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -301,10 +389,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -314,19 +415,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -342,6 +440,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index cb9dfb30b6747c1fb472c0292af103021f23f001..7d46fbec70424dc84fbe07a9ab1e77c7d1f2bec2 100644 (file)
@@ -1,49 +1,75 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-mac"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MAC=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_MAC=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -51,25 +77,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -80,6 +118,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -93,22 +133,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -118,7 +167,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -127,31 +175,45 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_IPDDP_DECAP=y
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
-CONFIG_BLK_DEV_SWIM=y
+CONFIG_BLK_DEV_SWIM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_MAC_IDE=y
 CONFIG_RAID_ATTRS=m
@@ -164,29 +226,30 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MAC_SCSI=y
 CONFIG_SCSI_MAC_ESP=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_ADB=y
 CONFIG_ADB_MACII=y
-CONFIG_ADB_MACIISI=y
 CONFIG_ADB_IOP=y
 CONFIG_ADB_PMU68K=y
 CONFIG_ADB_CUDA=y
@@ -194,46 +257,61 @@ CONFIG_INPUT_ADBHID=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MAC8390=y
-CONFIG_MAC89x0=m
-CONFIG_MACSONIC=m
 CONFIG_MACMACE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_MACSONIC=y
+CONFIG_MAC8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
 CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_PMACZILOG=y
 CONFIG_SERIAL_PMACZILOG_TTYS=y
 CONFIG_SERIAL_PMACZILOG_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_VALKYRIE=y
@@ -242,46 +320,60 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=y
-CONFIG_HFSPLUS_FS=y
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
+CONFIG_NFS_FS=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -320,10 +412,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -333,19 +438,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -361,6 +463,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 8d5def4a31e026e657ea78429a3fa36ac3d4fe42..0f795d8e65fafbc28b9d765911ac2416db5f3c6c 100644 (file)
@@ -1,15 +1,29 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-multi"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_M68020=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_M68KFPU_EMU=y
 CONFIG_AMIGA=y
 CONFIG_ATARI=y
 CONFIG_MAC=y
@@ -21,48 +35,50 @@ CONFIG_BVME6000=y
 CONFIG_HP300=y
 CONFIG_SUN3X=y
 CONFIG_Q40=y
-CONFIG_M68020=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
 CONFIG_ZORRO=y
 CONFIG_AMIGA_PCMCIA=y
-CONFIG_STRAM_PROC=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_ZORRO_NAMES=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -70,25 +86,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -99,6 +127,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -112,22 +142,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -137,7 +176,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -146,22 +184,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_IPDDP_DECAP=y
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_AMIGA=m
@@ -170,15 +220,17 @@ CONFIG_PARPORT_ATARI=m
 CONFIG_PARPORT_1284=y
 CONFIG_AMIGA_FLOPPY=y
 CONFIG_ATARI_FLOPPY=y
-CONFIG_BLK_DEV_SWIM=y
+CONFIG_BLK_DEV_SWIM=m
 CONFIG_AMIGA_Z2RAM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_GAYLE=y
 CONFIG_BLK_DEV_BUDDHA=y
@@ -195,11 +247,9 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_A3000_SCSI=y
 CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
@@ -213,21 +263,24 @@ CONFIG_MVME16x_SCSI=y
 CONFIG_BVME6000_SCSI=y
 CONFIG_SUN3X_ESP=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_ADB=y
 CONFIG_ADB_MACII=y
-CONFIG_ADB_MACIISI=y
 CONFIG_ADB_IOP=y
 CONFIG_ADB_PMU68K=y
 CONFIG_ADB_CUDA=y
@@ -235,49 +288,64 @@ CONFIG_INPUT_ADBHID=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
-CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
-CONFIG_ARIADNE=y
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
 CONFIG_A2065=y
-CONFIG_HYDRA=y
-CONFIG_ZORRO8390=y
-CONFIG_APNE=y
-CONFIG_MAC8390=y
-CONFIG_MAC89x0=y
-CONFIG_MACSONIC=y
-CONFIG_MACMACE=y
-CONFIG_MVME147_NET=y
-CONFIG_MVME16x_NET=y
-CONFIG_BVME6000_NET=y
+CONFIG_ARIADNE=y
 CONFIG_ATARILANCE=y
-CONFIG_SUN3LANCE=y
 CONFIG_HPLANCE=y
+CONFIG_MVME147_NET=y
+CONFIG_SUN3LANCE=y
+CONFIG_MACMACE=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_BVME6000_NET=y
+CONFIG_MVME16x_NET=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_MACSONIC=y
+CONFIG_HYDRA=y
+CONFIG_MAC8390=y
 CONFIG_NE2000=m
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_APNE=y
+CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_AMIGA=y
 CONFIG_KEYBOARD_ATARI=y
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_MOUSE_AMIGA=m
 CONFIG_MOUSE_ATARI=m
@@ -285,18 +353,20 @@ CONFIG_INPUT_JOYSTICK=y
 CONFIG_JOYSTICK_AMIGA=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
-CONFIG_HP_SDC_RTC=y
-# CONFIG_SERIO_SERPORT is not set
+CONFIG_HP_SDC_RTC=m
 CONFIG_SERIO_Q40KBD=y
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_PMACZILOG=y
 CONFIG_SERIAL_PMACZILOG_TTYS=y
 CONFIG_SERIAL_PMACZILOG_CONSOLE=y
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
@@ -316,7 +386,20 @@ CONFIG_DMASOUND_PAULA=m
 CONFIG_DMASOUND_Q40=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_RP5C01=m
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
 CONFIG_ATARI_DSP56K=m
 CONFIG_AMIGA_BUILTIN_SERIAL=y
 CONFIG_SERIAL_CONSOLE=y
@@ -324,42 +407,49 @@ CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=y
-CONFIG_HFSPLUS_FS=y
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -398,10 +488,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -411,19 +514,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -439,6 +539,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index e2af46f530c1c589d94e429149a78bc934f7d9f3..5586c6529fce367fb22061b87c708b9ba10a1f61 100644 (file)
@@ -1,52 +1,73 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-mvme147"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_M68030=y
 CONFIG_VME=y
 CONFIG_MVME147=y
-CONFIG_M68030=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -54,25 +75,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -83,6 +116,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -96,22 +131,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -121,7 +165,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -130,21 +173,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -159,103 +215,132 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MVME147_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_MVME147_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -294,10 +379,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -307,19 +405,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -335,6 +430,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 7c9402b2097fcb89b81202970bb1bc88a6ef90d4..e5e8262bbacdd0a94c4f0b1e0e89c06166ddd8ec 100644 (file)
@@ -1,53 +1,74 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-mvme16x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_VME=y
-CONFIG_MVME16x=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_MVME16x=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -55,25 +76,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -84,6 +117,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -97,22 +132,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -122,7 +166,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -131,21 +174,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -160,103 +216,131 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MVME16x_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_MVME16x_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -295,10 +379,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -308,19 +405,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -336,6 +430,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 19d23db690a4789bcf3d9369850170bd6c9f40bd..8982370e8b42166a96637687c0aa9bbe0ddc5731 100644 (file)
@@ -1,49 +1,74 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-q40"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_Q40=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_Q40=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -51,25 +76,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -80,6 +117,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -93,22 +132,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -118,7 +166,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -127,26 +174,40 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_Q40IDE=y
 CONFIG_RAID_ATTRS=m
@@ -159,61 +220,82 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_NE2000=m
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_Q40KBD=m
+CONFIG_SERIO_Q40KBD=y
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -222,46 +304,61 @@ CONFIG_SOUND=m
 CONFIG_DMASOUND_Q40=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -300,10 +397,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -313,19 +423,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -341,6 +448,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index ca6c0b4cab7754be95b0ed9bf8564cd243287abb..54674d61e00141069e1fb5113e22e19784fc5c01 100644 (file)
@@ -1,50 +1,71 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-sun3"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_SUN3=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -52,25 +73,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -81,6 +114,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -94,22 +129,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -119,7 +163,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -128,21 +171,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -157,107 +213,136 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_SUN3_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_SUN3LANCE=y
+# CONFIG_NET_CADENCE is not set
 CONFIG_SUN3_82586=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -296,10 +381,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -309,19 +407,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -337,6 +432,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index c80941c7759e2f7530c61b2b3a73545dea1892af..832d9539f44194faff9d4e6940223f1d938850fc 100644 (file)
@@ -1,50 +1,71 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-sun3x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_SUN3X=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -52,25 +73,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -81,6 +114,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -94,22 +129,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -119,7 +163,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -128,21 +171,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -157,106 +213,136 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_SUN3X_ESP=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_SUN3LANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -295,10 +381,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -308,19 +407,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -336,6 +432,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index c7933e41f10d7a5d864731467bc6435d0eb0b5a5..09d77a862da3d961029bdbd260557a33f472523f 100644 (file)
@@ -6,7 +6,6 @@ generic-y += device.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
-generic-y += futex.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
new file mode 100644 (file)
index 0000000..bc868af
--- /dev/null
@@ -0,0 +1,94 @@
+#ifndef _ASM_M68K_FUTEX_H
+#define _ASM_M68K_FUTEX_H
+
+#ifdef __KERNEL__
+#if !defined(CONFIG_MMU)
+#include <asm-generic/futex.h>
+#else  /* CONFIG_MMU */
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
+{
+       u32 val;
+
+       if (unlikely(get_user(val, uaddr) != 0))
+               return -EFAULT;
+
+       if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+               return -EFAULT;
+
+       *uval = val;
+
+       return 0;
+}
+
+static inline int
+futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+       int op = (encoded_op >> 28) & 7;
+       int cmp = (encoded_op >> 24) & 15;
+       int oparg = (encoded_op << 8) >> 20;
+       int cmparg = (encoded_op << 20) >> 20;
+       int oldval, ret;
+       u32 tmp;
+
+       if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+               oparg = 1 << oparg;
+
+       pagefault_disable();    /* implies preempt_disable() */
+
+       ret = -EFAULT;
+       if (unlikely(get_user(oldval, uaddr) != 0))
+               goto out_pagefault_enable;
+
+       ret = 0;
+       tmp = oldval;
+
+       switch (op) {
+       case FUTEX_OP_SET:
+               tmp = oparg;
+               break;
+       case FUTEX_OP_ADD:
+               tmp += oparg;
+               break;
+       case FUTEX_OP_OR:
+               tmp |= oparg;
+               break;
+       case FUTEX_OP_ANDN:
+               tmp &= ~oparg;
+               break;
+       case FUTEX_OP_XOR:
+               tmp ^= oparg;
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+               ret = -EFAULT;
+
+out_pagefault_enable:
+       pagefault_enable();     /* subsumes preempt_enable() */
+
+       if (ret == 0) {
+               switch (cmp) {
+               case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+               case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+               case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+               case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+               case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+               case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+               default: ret = -ENOSYS;
+               }
+       }
+       return ret;
+}
+
+#endif /* CONFIG_MMU */
+#endif /* __KERNEL__ */
+#endif /* _ASM_M68K_FUTEX_H */
index 8cc83431805b73eb44d777d8749e6fc3de66fbbe..2f6eec1e34b416e6791b2892958e192b6a95414c 100644 (file)
@@ -86,6 +86,7 @@ static inline int gpio_cansleep(unsigned gpio)
        return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
 }
 
+#ifndef CONFIG_GPIOLIB
 static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
 {
        int err;
@@ -105,5 +106,5 @@ static inline int gpio_request_one(unsigned gpio, unsigned long flags, const cha
 
        return err;
 }
-
+#endif /* !CONFIG_GPIOLIB */
 #endif
index d197e7ff62c5635535db183ac46077d1478eacf9..ac85f16534af929c33d0f1daf1f3a565488b6af4 100644 (file)
@@ -2752,11 +2752,9 @@ func_return      get_new_page
 #ifdef CONFIG_MAC
 
 L(scc_initable_mac):
-       .byte   9,12            /* Reset */
        .byte   4,0x44          /* x16, 1 stopbit, no parity */
        .byte   3,0xc0          /* receiver: 8 bpc */
        .byte   5,0xe2          /* transmitter: 8 bpc, assert dtr/rts */
-       .byte   9,0             /* no interrupts */
        .byte   10,0            /* NRZ */
        .byte   11,0x50         /* use baud rate generator */
        .byte   12,1,13,0       /* 38400 baud */
@@ -2899,6 +2897,7 @@ func_start        serial_init,%d0/%d1/%a0/%a1
        is_not_mac(L(serial_init_not_mac))
 
 #ifdef SERIAL_DEBUG
+
 /* You may define either or both of these. */
 #define MAC_USE_SCC_A /* Modem port */
 #define MAC_USE_SCC_B /* Printer port */
@@ -2908,9 +2907,21 @@ func_start       serial_init,%d0/%d1/%a0/%a1
 #define mac_scc_cha_b_data_offset      0x4
 #define mac_scc_cha_a_data_offset      0x6
 
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+       movel   %pc@(L(mac_sccbase)),%a0
+       /* Reset SCC device */
+       moveb   #9,%a0@(mac_scc_cha_a_ctrl_offset)
+       moveb   #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
+       /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
+       /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
+       movel   #35,%d0
+5:
+       subq    #1,%d0
+       jne     5b
+#endif
+
 #ifdef MAC_USE_SCC_A
        /* Initialize channel A */
-       movel   %pc@(L(mac_sccbase)),%a0
        lea     %pc@(L(scc_initable_mac)),%a1
 5:     moveb   %a1@+,%d0
        jmi     6f
@@ -2922,9 +2933,6 @@ func_start        serial_init,%d0/%d1/%a0/%a1
 
 #ifdef MAC_USE_SCC_B
        /* Initialize channel B */
-#ifndef MAC_USE_SCC_A  /* Load mac_sccbase only if needed */
-       movel   %pc@(L(mac_sccbase)),%a0
-#endif /* MAC_USE_SCC_A */
        lea     %pc@(L(scc_initable_mac)),%a1
 7:     moveb   %a1@+,%d0
        jmi     8f
@@ -2933,6 +2941,7 @@ func_start        serial_init,%d0/%d1/%a0/%a1
        jra     7b
 8:
 #endif /* MAC_USE_SCC_B */
+
 #endif /* SERIAL_DEBUG */
 
        jra     L(serial_init_done)
@@ -3006,17 +3015,17 @@ func_start      serial_putc,%d0/%d1/%a0/%a1
 
 #ifdef SERIAL_DEBUG
 
-#ifdef MAC_USE_SCC_A
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
        movel   %pc@(L(mac_sccbase)),%a1
+#endif
+
+#ifdef MAC_USE_SCC_A
 3:     btst    #2,%a1@(mac_scc_cha_a_ctrl_offset)
        jeq     3b
        moveb   %d0,%a1@(mac_scc_cha_a_data_offset)
 #endif /* MAC_USE_SCC_A */
 
 #ifdef MAC_USE_SCC_B
-#ifndef MAC_USE_SCC_A  /* Load mac_sccbase only if needed */
-       movel   %pc@(L(mac_sccbase)),%a1
-#endif /* MAC_USE_SCC_A */
 4:     btst    #2,%a1@(mac_scc_cha_b_ctrl_offset)
        jeq     4b
        moveb   %d0,%a1@(mac_scc_cha_b_data_offset)
index 0f553bc009a0a848541a3410960ea959e074bd70..ffea82a16d2cb0897c7d2d4b6a79a67cf1130f44 100644 (file)
@@ -102,21 +102,23 @@ do { \
 
 #define flush_cache_range(vma, start, len) do { } while (0)
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
-do {                                                                   \
-       u32 addr = virt_to_phys(dst);                                   \
-       memcpy((dst), (src), (len));                                    \
-       if (vma->vm_flags & VM_EXEC) {                                  \
-               invalidate_icache_range((unsigned) (addr),              \
-                                       (unsigned) (addr) + PAGE_SIZE); \
-               flush_dcache_range((unsigned) (addr),                   \
-                                       (unsigned) (addr) + PAGE_SIZE); \
-       }                                                               \
-} while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len)           \
-do {                                                                   \
-       memcpy((dst), (src), (len));                                    \
-} while (0)
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+                                    struct page *page, unsigned long vaddr,
+                                    void *dst, void *src, int len)
+{
+       u32 addr = virt_to_phys(dst);
+       memcpy(dst, src, len);
+       if (vma->vm_flags & VM_EXEC) {
+               invalidate_icache_range(addr, addr + PAGE_SIZE);
+               flush_dcache_range(addr, addr + PAGE_SIZE);
+       }
+}
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+                                      struct page *page, unsigned long vaddr,
+                                      void *dst, void *src, int len)
+{
+       memcpy(dst, src, len);
+}
 
 #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
index ff8cde159d9a4809abac1994537b5425c71c9cb4..01848f056f439d251f9f61dcc0806a8d47aad4ba 100644 (file)
@@ -105,7 +105,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 
        __asm__ __volatile__ ("1:       lwx     %1, %3, r0;             \
                                        cmp     %2, %1, %4;             \
-                                       beqi    %2, 3f;                 \
+                                       bnei    %2, 3f;                 \
                                2:      swx     %5, %3, r0;             \
                                        addic   %2, r0, 0;              \
                                        bnei    %2, 1b;                 \
index 8cb8a8566edea40d3df6aaa20a373fbd23390138..2565cb94f32f0857bb4d925527c2ee9c77c7ea89 100644 (file)
@@ -123,11 +123,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
  * inb_p/inw_p/...
  * The macros don't do byte-swapping.
  */
-#define inb(port)              readb((u8 *)((port)))
+#define inb(port)              readb((u8 *)((unsigned long)(port)))
 #define outb(val, port)                writeb((val), (u8 *)((unsigned long)(port)))
-#define inw(port)              readw((u16 *)((port)))
+#define inw(port)              readw((u16 *)((unsigned long)(port)))
 #define outw(val, port)                writew((val), (u16 *)((unsigned long)(port)))
-#define inl(port)              readl((u32 *)((port)))
+#define inl(port)              readl((u32 *)((unsigned long)(port)))
 #define outl(val, port)                writel((val), (u32 *)((unsigned long)(port)))
 
 #define inb_p(port)            inb((port))
index efe59d881789fbafd7db6bd4933263394995da1f..04e49553bdf978c534519550382a5dca8e98e6ed 100644 (file)
@@ -99,13 +99,13 @@ static inline int access_ok(int type, const void __user *addr,
        if ((get_fs().seg < ((unsigned long)addr)) ||
                        (get_fs().seg < ((unsigned long)addr + size - 1))) {
                pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
-                       type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+                       type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
                return 0;
        }
 ok:
        pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
-                       type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+                       type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
                        (u32)get_fs().seg);
        return 1;
 }
index 4254514b4c8cf8cf3819c66ad90eb4e18145c696..a6e44410672dc036ed7009126cce80e846411127 100644 (file)
@@ -140,7 +140,7 @@ do {                                                                        \
 /* It is used only first parameter for OP - for wic, wdc */
 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                        \
 do {                                                                   \
-       int volatile temp;                                              \
+       int volatile temp = 0;                                          \
        int align = ~(line_length - 1);                                 \
        end = ((end & align) == end) ? end - line_length : end & align; \
        WARN_ON(end - start < 0);                                       \
index cb0f6afb73894ef16019b7a53861330a49d72d27..9edc35ff8cf1420e9576f195f92658e71517c073 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-gpio.h>
 #include <asm/bootinfo.h>
+#include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <prom.h>
index 38afb11ba2c4605e5c370f979eba3bc86a97621b..93fa586d52e2d480d6157cc0fcffdf962cad6534 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 
+#include <asm/idle.h>
 #include <asm/processor.h>
 #include <asm/time.h>
 #include <asm/mach-au1x00/au1000.h>
index a0233a2c198812980405dc05e117de444413552e..8be4e856b8b8942e903c286c2004fb46c4d0df9d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/clk.h>
 
 #include <asm/bootinfo.h>
+#include <asm/idle.h>
 #include <asm/time.h>          /* for mips_hpt_frequency */
 #include <asm/reboot.h>                /* for _machine_{restart,halt} */
 #include <asm/mips_machine.h>
index a9505c4867e8dd1054646643abdf1640172b0dce..9c0ddafafb6cee7116bf22638bca552c334601c4 100644 (file)
@@ -845,6 +845,10 @@ int __init board_register_devices(void)
            !bcm63xx_nvram_get_mac_address(board.enet1.mac_addr))
                bcm63xx_enet_register(1, &board.enet1);
 
+       if (board.has_enetsw &&
+           !bcm63xx_nvram_get_mac_address(board.enetsw.mac_addr))
+               bcm63xx_enetsw_register(&board.enetsw);
+
        if (board.has_usbd)
                bcm63xx_usbd_register(&board.usbd);
 
index 39c23366c5c7ea4bcbe6c272fe0279aca4d82aba..52bc01df9bfe10575d83abd2e9c78313a603cda0 100644 (file)
@@ -9,16 +9,60 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
+#include <linux/export.h>
 #include <bcm63xx_dev_enet.h>
 #include <bcm63xx_io.h>
 #include <bcm63xx_regs.h>
 
+#ifdef BCMCPU_RUNTIME_DETECT
+static const unsigned long bcm6348_regs_enetdmac[] = {
+       [ENETDMAC_CHANCFG]      = ENETDMAC_CHANCFG_REG,
+       [ENETDMAC_IR]           = ENETDMAC_IR_REG,
+       [ENETDMAC_IRMASK]       = ENETDMAC_IRMASK_REG,
+       [ENETDMAC_MAXBURST]     = ENETDMAC_MAXBURST_REG,
+};
+
+static const unsigned long bcm6345_regs_enetdmac[] = {
+       [ENETDMAC_CHANCFG]      = ENETDMA_6345_CHANCFG_REG,
+       [ENETDMAC_IR]           = ENETDMA_6345_IR_REG,
+       [ENETDMAC_IRMASK]       = ENETDMA_6345_IRMASK_REG,
+       [ENETDMAC_MAXBURST]     = ENETDMA_6345_MAXBURST_REG,
+       [ENETDMAC_BUFALLOC]     = ENETDMA_6345_BUFALLOC_REG,
+       [ENETDMAC_RSTART]       = ENETDMA_6345_RSTART_REG,
+       [ENETDMAC_FC]           = ENETDMA_6345_FC_REG,
+       [ENETDMAC_LEN]          = ENETDMA_6345_LEN_REG,
+};
+
+const unsigned long *bcm63xx_regs_enetdmac;
+EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
+
+static __init void bcm63xx_enetdmac_regs_init(void)
+{
+       if (BCMCPU_IS_6345())
+               bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
+       else
+               bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac;
+}
+#else
+static __init void bcm63xx_enetdmac_regs_init(void) { }
+#endif
+
 static struct resource shared_res[] = {
        {
                .start          = -1, /* filled at runtime */
                .end            = -1, /* filled at runtime */
                .flags          = IORESOURCE_MEM,
        },
+       {
+               .start          = -1, /* filled at runtime */
+               .end            = -1, /* filled at runtime */
+               .flags          = IORESOURCE_MEM,
+       },
+       {
+               .start          = -1, /* filled at runtime */
+               .end            = -1, /* filled at runtime */
+               .flags          = IORESOURCE_MEM,
+       },
 };
 
 static struct platform_device bcm63xx_enet_shared_device = {
@@ -94,6 +138,71 @@ static struct platform_device bcm63xx_enet1_device = {
        },
 };
 
+static struct resource enetsw_res[] = {
+       {
+               /* start & end filled at runtime */
+               .flags          = IORESOURCE_MEM,
+       },
+       {
+               /* start filled at runtime */
+               .flags          = IORESOURCE_IRQ,
+       },
+       {
+               /* start filled at runtime */
+               .flags          = IORESOURCE_IRQ,
+       },
+};
+
+static struct bcm63xx_enetsw_platform_data enetsw_pd;
+
+static struct platform_device bcm63xx_enetsw_device = {
+       .name           = "bcm63xx_enetsw",
+       .num_resources  = ARRAY_SIZE(enetsw_res),
+       .resource       = enetsw_res,
+       .dev            = {
+               .platform_data = &enetsw_pd,
+       },
+};
+
+static int __init register_shared(void)
+{
+       int ret, chan_count;
+
+       if (shared_device_registered)
+               return 0;
+
+       bcm63xx_enetdmac_regs_init();
+
+       shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
+       shared_res[0].end = shared_res[0].start;
+       if (BCMCPU_IS_6345())
+               shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
+       else
+               shared_res[0].end += (RSET_ENETDMA_SIZE)  - 1;
+
+       if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
+               chan_count = 32;
+       else if (BCMCPU_IS_6345())
+               chan_count = 8;
+       else
+               chan_count = 16;
+
+       shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
+       shared_res[1].end = shared_res[1].start;
+       shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count)  - 1;
+
+       shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
+       shared_res[2].end = shared_res[2].start;
+       shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count)  - 1;
+
+       ret = platform_device_register(&bcm63xx_enet_shared_device);
+       if (ret)
+               return ret;
+       shared_device_registered = 1;
+
+       return 0;
+}
+
 int __init bcm63xx_enet_register(int unit,
                                 const struct bcm63xx_enet_platform_data *pd)
 {
@@ -104,22 +213,12 @@ int __init bcm63xx_enet_register(int unit,
        if (unit > 1)
                return -ENODEV;
 
-       if (unit == 1 && BCMCPU_IS_6338())
+       if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
                return -ENODEV;
 
-       if (!shared_device_registered) {
-               shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
-               shared_res[0].end = shared_res[0].start;
-               if (BCMCPU_IS_6338())
-                       shared_res[0].end += (RSET_ENETDMA_SIZE / 2)  - 1;
-               else
-                       shared_res[0].end += (RSET_ENETDMA_SIZE)  - 1;
-
-               ret = platform_device_register(&bcm63xx_enet_shared_device);
-               if (ret)
-                       return ret;
-               shared_device_registered = 1;
-       }
+       ret = register_shared();
+       if (ret)
+               return ret;
 
        if (unit == 0) {
                enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
@@ -155,8 +254,62 @@ int __init bcm63xx_enet_register(int unit,
                dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
        }
 
+       dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
+       dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
+       if (BCMCPU_IS_6345()) {
+               dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
+               dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
+               dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
+               dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
+               dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
+               dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
+               dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
+       } else {
+               dpd->dma_has_sram = true;
+               dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
+       }
+
        ret = platform_device_register(pdev);
        if (ret)
                return ret;
        return 0;
 }
+
+int __init
+bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd)
+{
+       int ret;
+
+       if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368())
+               return -ENODEV;
+
+       ret = register_shared();
+       if (ret)
+               return ret;
+
+       enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW);
+       enetsw_res[0].end = enetsw_res[0].start;
+       enetsw_res[0].end += RSET_ENETSW_SIZE - 1;
+       enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0);
+       enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0);
+       if (!enetsw_res[2].start)
+               enetsw_res[2].start = -1;
+
+       memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd));
+
+       if (BCMCPU_IS_6328())
+               enetsw_pd.num_ports = ENETSW_PORTS_6328;
+       else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
+               enetsw_pd.num_ports = ENETSW_PORTS_6368;
+
+       enetsw_pd.dma_has_sram = true;
+       enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH;
+       enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
+       enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
+
+       ret = platform_device_register(&bcm63xx_enetsw_device);
+       if (ret)
+               return ret;
+
+       return 0;
+}
index b0baa299f899b3c2322c0a4346cc4947dcfc2457..01b1b3f94feb77b115f6911771236961ff3e151c 100644 (file)
@@ -428,13 +428,16 @@ static void octeon_restart(char *command)
  */
 static void octeon_kill_core(void *arg)
 {
-       mb();
-       if (octeon_is_simulation()) {
-               /* The simulator needs the watchdog to stop for dead cores */
-               cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+       if (octeon_is_simulation())
                /* A break instruction causes the simulator stop a core */
-               asm volatile ("sync\nbreak");
-       }
+               asm volatile ("break" ::: "memory");
+
+       local_irq_disable();
+       /* Disable watchdog on this core. */
+       cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+       /* Spin in a low power mode. */
+       while (true)
+               asm volatile ("wait" ::: "memory");
 }
 
 
index 516b4428df4ecd3ac5b45a6397f8ffe4a2e070c1..4eedd481dd007bba3b25bfd733fca34cbe895730 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/io.h>
 #include <linux/leds.h>
 
+#include <asm/idle.h>
 #include <asm/processor.h>
 
 #include <cobalt.h>
index face9d26e6d5a1558c93cd149e60eb742fad8619..bac26b971c5e86342dc751bf946434a6ceead08f 100644 (file)
@@ -228,7 +228,6 @@ CONFIG_HIDRAW=y
 CONFIG_USB_HID=y
 CONFIG_USB_SUPPORT=y
 CONFIG_USB=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_EHCI_TT_NEWSCHED=y
index 14752dde754018170930faa0aabfbd049b2a4312..e2b4ad55462f3477f6c4b5e63d54057064b92fde 100644 (file)
@@ -344,7 +344,6 @@ CONFIG_UHID=y
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
 CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
index b6acd2f256b682f4c3ab37774eb3ba70bcee7595..343bebc4b63b981724a17b770dc71b895d4dc225 100644 (file)
@@ -300,7 +300,6 @@ CONFIG_USB=y
 CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_OTG_WHITELIST=y
 CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
index c9456e7a7283dfa15b507f021bb3f8f6696a45ca..778e32d817bc7da48056167edb4e137547ca9ef4 100644 (file)
@@ -6,8 +6,6 @@
 #include <linux/seq_file.h>
 #include <linux/clk.h>
 
-extern void (*cpu_wait) (void);
-
 struct clk;
 
 struct clk_ops {
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
new file mode 100644 (file)
index 0000000..d192158
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __ASM_IDLE_H
+#define __ASM_IDLE_H
+
+#include <linux/linkage.h>
+
+extern void (*cpu_wait)(void);
+extern void r4k_wait(void);
+extern asmlinkage void __r4k_wait(void);
+extern void r4k_wait_irqoff(void);
+extern void __pastwait(void);
+
+static inline int using_rollback_handler(void)
+{
+       return cpu_wait == r4k_wait;
+}
+
+static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
+{
+       return addr >= (unsigned long)r4k_wait_irqoff &&
+              addr < (unsigned long)__pastwait;
+}
+
+#endif /* __ASM_IDLE_H  */
index 1be13727323f7f2baf0395ee3360de5a78fcfaae..b7e59853fd33b05df930c3fb795aa03a9a6de23e 100644 (file)
@@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base)
  */
 static inline unsigned long virt_to_phys(volatile const void *address)
 {
-       return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
+       return __pa(address);
 }
 
 /*
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
deleted file mode 100644 (file)
index 85789ea..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#ifndef __LINUX_KVM_MIPS_H
-#define __LINUX_KVM_MIPS_H
-
-#include <linux/types.h>
-
-#define __KVM_MIPS
-
-#define N_MIPS_COPROC_REGS      32
-#define N_MIPS_COPROC_SEL      8
-
-/* for KVM_GET_REGS and KVM_SET_REGS */
-struct kvm_regs {
-       __u32 gprs[32];
-       __u32 hi;
-       __u32 lo;
-       __u32 pc;
-
-       __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
-};
-
-/* for KVM_GET_SREGS and KVM_SET_SREGS */
-struct kvm_sregs {
-};
-
-/* for KVM_GET_FPU and KVM_SET_FPU */
-struct kvm_fpu {
-};
-
-struct kvm_debug_exit_arch {
-};
-
-/* for KVM_SET_GUEST_DEBUG */
-struct kvm_guest_debug_arch {
-};
-
-struct kvm_mips_interrupt {
-       /* in */
-       __u32 cpu;
-       __u32 irq;
-};
-
-/* definition of registers in kvm_run */
-struct kvm_sync_regs {
-};
-
-#endif /* __LINUX_KVM_MIPS_H */
index e68781e183873b8ef40ae98157a4e6b260dd3cf9..4d6fa0bf1305d7376c7ae2ce2201fc412a8791de 100644 (file)
@@ -336,7 +336,7 @@ enum emulation_result {
 #define VPN2_MASK           0xffffe000
 #define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
 #define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
-#define TLB_ASID(x)         (ASID_MASK((x).tlb_hi))
+#define TLB_ASID(x)         ((x).tlb_hi & ASID_MASK)
 #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
 
 struct kvm_mips_tlb {
@@ -496,10 +496,6 @@ struct kvm_mips_callbacks {
                            uint32_t cause);
        int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
                          uint32_t cause);
-       int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
-                                   struct kvm_regs *regs);
-       int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
-                                   struct kvm_regs *regs);
 };
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
index 336228990808e5c4aed717f21682ebd1c85045bc..e6e65dc7d50232d979348964d71a51a7b5887b8f 100644 (file)
@@ -173,7 +173,10 @@ enum bcm63xx_regs_set {
 #define BCM_6358_RSET_SPI_SIZE         1804
 #define BCM_6368_RSET_SPI_SIZE         1804
 #define RSET_ENET_SIZE                 2048
-#define RSET_ENETDMA_SIZE              2048
+#define RSET_ENETDMA_SIZE              256
+#define RSET_6345_ENETDMA_SIZE         64
+#define RSET_ENETDMAC_SIZE(chans)      (16 * (chans))
+#define RSET_ENETDMAS_SIZE(chans)      (16 * (chans))
 #define RSET_ENETSW_SIZE               65536
 #define RSET_UART_SIZE                 24
 #define RSET_UDC_SIZE                  256
@@ -298,7 +301,7 @@ enum bcm63xx_regs_set {
 #define BCM_6345_USBDMA_BASE           (0xfffe2800)
 #define BCM_6345_ENET0_BASE            (0xfffe1800)
 #define BCM_6345_ENETDMA_BASE          (0xfffe2800)
-#define BCM_6345_ENETDMAC_BASE         (0xfffe2900)
+#define BCM_6345_ENETDMAC_BASE         (0xfffe2840)
 #define BCM_6345_ENETDMAS_BASE         (0xfffe2a00)
 #define BCM_6345_ENETSW_BASE           (0xdeadbeef)
 #define BCM_6345_PCMCIA_BASE           (0xfffe2028)
index d53f611184b93429d96a3037a4cab42664e2c6bc..753953e862423fa00df88eb0c612e7b5840c54be 100644 (file)
@@ -4,6 +4,8 @@
 #include <linux/if_ether.h>
 #include <linux/init.h>
 
+#include <bcm63xx_regs.h>
+
 /*
  * on board ethernet platform data
  */
@@ -37,9 +39,129 @@ struct bcm63xx_enet_platform_data {
                                          int phy_id, int reg),
                          void (*mii_write)(struct net_device *dev,
                                            int phy_id, int reg, int val));
+
+       /* DMA channel enable mask */
+       u32 dma_chan_en_mask;
+
+       /* DMA channel interrupt mask */
+       u32 dma_chan_int_mask;
+
+       /* DMA engine has internal SRAM */
+       bool dma_has_sram;
+
+       /* DMA channel register width */
+       unsigned int dma_chan_width;
+
+       /* DMA descriptor shift */
+       unsigned int dma_desc_shift;
+};
+
+/*
+ * on board ethernet switch platform data
+ */
+#define ENETSW_MAX_PORT        8
+#define ENETSW_PORTS_6328 5 /* 4 FE PHY + 1 RGMII */
+#define ENETSW_PORTS_6368 6 /* 4 FE PHY + 2 RGMII */
+
+#define ENETSW_RGMII_PORT0     4
+
+struct bcm63xx_enetsw_port {
+       int             used;
+       int             phy_id;
+
+       int             bypass_link;
+       int             force_speed;
+       int             force_duplex_full;
+
+       const char      *name;
+};
+
+struct bcm63xx_enetsw_platform_data {
+       char mac_addr[ETH_ALEN];
+       int num_ports;
+       struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
+
+       /* DMA channel enable mask */
+       u32 dma_chan_en_mask;
+
+       /* DMA channel interrupt mask */
+       u32 dma_chan_int_mask;
+
+       /* DMA channel register width */
+       unsigned int dma_chan_width;
+
+       /* DMA engine has internal SRAM */
+       bool dma_has_sram;
 };
 
 int __init bcm63xx_enet_register(int unit,
                                 const struct bcm63xx_enet_platform_data *pd);
 
+int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd);
+
+enum bcm63xx_regs_enetdmac {
+       ENETDMAC_CHANCFG,
+       ENETDMAC_IR,
+       ENETDMAC_IRMASK,
+       ENETDMAC_MAXBURST,
+       ENETDMAC_BUFALLOC,
+       ENETDMAC_RSTART,
+       ENETDMAC_FC,
+       ENETDMAC_LEN,
+};
+
+static inline unsigned long bcm63xx_enetdmacreg(enum bcm63xx_regs_enetdmac reg)
+{
+#ifdef BCMCPU_RUNTIME_DETECT
+       extern const unsigned long *bcm63xx_regs_enetdmac;
+
+       return bcm63xx_regs_enetdmac[reg];
+#else
+#ifdef CONFIG_BCM63XX_CPU_6345
+       switch (reg) {
+       case ENETDMAC_CHANCFG:
+               return ENETDMA_6345_CHANCFG_REG;
+       case ENETDMAC_IR:
+               return ENETDMA_6345_IR_REG;
+       case ENETDMAC_IRMASK:
+               return ENETDMA_6345_IRMASK_REG;
+       case ENETDMAC_MAXBURST:
+               return ENETDMA_6345_MAXBURST_REG;
+       case ENETDMAC_BUFALLOC:
+               return ENETDMA_6345_BUFALLOC_REG;
+       case ENETDMAC_RSTART:
+               return ENETDMA_6345_RSTART_REG;
+       case ENETDMAC_FC:
+               return ENETDMA_6345_FC_REG;
+       case ENETDMAC_LEN:
+               return ENETDMA_6345_LEN_REG;
+       }
+#endif
+#if defined(CONFIG_BCM63XX_CPU_6328) || \
+       defined(CONFIG_BCM63XX_CPU_6338) || \
+       defined(CONFIG_BCM63XX_CPU_6348) || \
+       defined(CONFIG_BCM63XX_CPU_6358) || \
+       defined(CONFIG_BCM63XX_CPU_6362) || \
+       defined(CONFIG_BCM63XX_CPU_6368)
+       switch (reg) {
+       case ENETDMAC_CHANCFG:
+               return ENETDMAC_CHANCFG_REG;
+       case ENETDMAC_IR:
+               return ENETDMAC_IR_REG;
+       case ENETDMAC_IRMASK:
+               return ENETDMAC_IRMASK_REG;
+       case ENETDMAC_MAXBURST:
+               return ENETDMAC_MAXBURST_REG;
+       case ENETDMAC_BUFALLOC:
+       case ENETDMAC_RSTART:
+       case ENETDMAC_FC:
+       case ENETDMAC_LEN:
+               return 0;
+       }
+#endif
+#endif
+       return 0;
+}
+
+
 #endif /* ! BCM63XX_DEV_ENET_H_ */
index 3203fe49b34d4d55e6808c0c50f4d55a4f9f91b0..eff7ca7d12b09f3de2c4810fc748cbc23ccc7807 100644 (file)
 /*************************************************************************
  * _REG relative to RSET_ENETDMA
  *************************************************************************/
+#define ENETDMA_CHAN_WIDTH             0x10
+#define ENETDMA_6345_CHAN_WIDTH                0x40
 
 /* Controller Configuration Register */
 #define ENETDMA_CFG_REG                        (0x0)
 /* State Ram Word 4 */
 #define ENETDMA_SRAM4_REG(x)           (0x20c + (x) * 0x10)
 
+/* Broadcom 6345 ENET DMA definitions */
+#define ENETDMA_6345_CHANCFG_REG       (0x00)
+
+#define ENETDMA_6345_MAXBURST_REG      (0x40)
+
+#define ENETDMA_6345_RSTART_REG                (0x08)
+
+#define ENETDMA_6345_LEN_REG           (0x0C)
+
+#define ENETDMA_6345_IR_REG            (0x14)
+
+#define ENETDMA_6345_IRMASK_REG                (0x18)
+
+#define ENETDMA_6345_FC_REG            (0x1C)
+
+#define ENETDMA_6345_BUFALLOC_REG      (0x20)
+
+/* Shift down for EOP, SOP and WRAP bits */
+#define ENETDMA_6345_DESC_SHIFT                (3)
 
 /*************************************************************************
  * _REG relative to RSET_ENETDMAC
  *************************************************************************/
 
 /* Channel Configuration register */
-#define ENETDMAC_CHANCFG_REG(x)                ((x) * 0x10)
+#define ENETDMAC_CHANCFG_REG           (0x0)
 #define ENETDMAC_CHANCFG_EN_SHIFT      0
 #define ENETDMAC_CHANCFG_EN_MASK       (1 << ENETDMAC_CHANCFG_EN_SHIFT)
 #define ENETDMAC_CHANCFG_PKTHALT_SHIFT 1
 #define ENETDMAC_CHANCFG_PKTHALT_MASK  (1 << ENETDMAC_CHANCFG_PKTHALT_SHIFT)
 #define ENETDMAC_CHANCFG_BUFHALT_SHIFT 2
 #define ENETDMAC_CHANCFG_BUFHALT_MASK  (1 << ENETDMAC_CHANCFG_BUFHALT_SHIFT)
+#define ENETDMAC_CHANCFG_CHAINING_SHIFT        2
+#define ENETDMAC_CHANCFG_CHAINING_MASK (1 << ENETDMAC_CHANCFG_CHAINING_SHIFT)
+#define ENETDMAC_CHANCFG_WRAP_EN_SHIFT 3
+#define ENETDMAC_CHANCFG_WRAP_EN_MASK  (1 << ENETDMAC_CHANCFG_WRAP_EN_SHIFT)
+#define ENETDMAC_CHANCFG_FLOWC_EN_SHIFT        4
+#define ENETDMAC_CHANCFG_FLOWC_EN_MASK (1 << ENETDMAC_CHANCFG_FLOWC_EN_SHIFT)
 
 /* Interrupt Control/Status register */
-#define ENETDMAC_IR_REG(x)             (0x4 + (x) * 0x10)
+#define ENETDMAC_IR_REG                        (0x4)
 #define ENETDMAC_IR_BUFDONE_MASK       (1 << 0)
 #define ENETDMAC_IR_PKTDONE_MASK       (1 << 1)
 #define ENETDMAC_IR_NOTOWNER_MASK      (1 << 2)
 
 /* Interrupt Mask register */
-#define ENETDMAC_IRMASK_REG(x)         (0x8 + (x) * 0x10)
+#define ENETDMAC_IRMASK_REG            (0x8)
 
 /* Maximum Burst Length */
-#define ENETDMAC_MAXBURST_REG(x)       (0xc + (x) * 0x10)
+#define ENETDMAC_MAXBURST_REG          (0xc)
 
 
 /*************************************************************************
  *************************************************************************/
 
 /* Ring Start Address register */
-#define ENETDMAS_RSTART_REG(x)         ((x) * 0x10)
+#define ENETDMAS_RSTART_REG            (0x0)
 
 /* State Ram Word 2 */
-#define ENETDMAS_SRAM2_REG(x)          (0x4 + (x) * 0x10)
+#define ENETDMAS_SRAM2_REG             (0x4)
 
 /* State Ram Word 3 */
-#define ENETDMAS_SRAM3_REG(x)          (0x8 + (x) * 0x10)
+#define ENETDMAS_SRAM3_REG             (0x8)
 
 /* State Ram Word 4 */
-#define ENETDMAS_SRAM4_REG(x)          (0xc + (x) * 0x10)
+#define ENETDMAS_SRAM4_REG             (0xc)
 
 
 /*************************************************************************
  * _REG relative to RSET_ENETSW
  *************************************************************************/
 
+/* Port traffic control */
+#define ENETSW_PTCTRL_REG(x)           (0x0 + (x))
+#define ENETSW_PTCTRL_RXDIS_MASK       (1 << 0)
+#define ENETSW_PTCTRL_TXDIS_MASK       (1 << 1)
+
+/* Switch mode register */
+#define ENETSW_SWMODE_REG              (0xb)
+#define ENETSW_SWMODE_FWD_EN_MASK      (1 << 1)
+
+/* IMP override Register */
+#define ENETSW_IMPOV_REG               (0xe)
+#define ENETSW_IMPOV_FORCE_MASK                (1 << 7)
+#define ENETSW_IMPOV_TXFLOW_MASK       (1 << 5)
+#define ENETSW_IMPOV_RXFLOW_MASK       (1 << 4)
+#define ENETSW_IMPOV_1000_MASK         (1 << 3)
+#define ENETSW_IMPOV_100_MASK          (1 << 2)
+#define ENETSW_IMPOV_FDX_MASK          (1 << 1)
+#define ENETSW_IMPOV_LINKUP_MASK       (1 << 0)
+
+/* Port override Register */
+#define ENETSW_PORTOV_REG(x)           (0x58 + (x))
+#define ENETSW_PORTOV_ENABLE_MASK      (1 << 6)
+#define ENETSW_PORTOV_TXFLOW_MASK      (1 << 5)
+#define ENETSW_PORTOV_RXFLOW_MASK      (1 << 4)
+#define ENETSW_PORTOV_1000_MASK                (1 << 3)
+#define ENETSW_PORTOV_100_MASK         (1 << 2)
+#define ENETSW_PORTOV_FDX_MASK         (1 << 1)
+#define ENETSW_PORTOV_LINKUP_MASK      (1 << 0)
+
+/* MDIO control register */
+#define ENETSW_MDIOC_REG               (0xb0)
+#define ENETSW_MDIOC_EXT_MASK          (1 << 16)
+#define ENETSW_MDIOC_REG_SHIFT         20
+#define ENETSW_MDIOC_PHYID_SHIFT       25
+#define ENETSW_MDIOC_RD_MASK           (1 << 30)
+#define ENETSW_MDIOC_WR_MASK           (1 << 31)
+
+/* MDIO data register */
+#define ENETSW_MDIOD_REG               (0xb4)
+
+/* Global Management Configuration Register */
+#define ENETSW_GMCR_REG                        (0x200)
+#define ENETSW_GMCR_RST_MIB_MASK       (1 << 0)
+
 /* MIB register */
 #define ENETSW_MIB_REG(x)              (0x2800 + (x) * 4)
 #define ENETSW_MIB_REG_COUNT           47
 
+/* Jumbo control register port mask register */
+#define ENETSW_JMBCTL_PORT_REG         (0x4004)
+
+/* Jumbo control mib good frame register */
+#define ENETSW_JMBCTL_MAXSIZE_REG      (0x4008)
+
 
 /*************************************************************************
  * _REG relative to RSET_OHCI_PRIV
index 682bcf3b492ae65cdfa19fd27c6fbb62552dc185..d9aee1a833f39de312e8a23571bcc9cad976000b 100644 (file)
@@ -24,6 +24,7 @@ struct board_info {
        /* enabled feature/device */
        unsigned int    has_enet0:1;
        unsigned int    has_enet1:1;
+       unsigned int    has_enetsw:1;
        unsigned int    has_pci:1;
        unsigned int    has_pccard:1;
        unsigned int    has_ohci0:1;
@@ -36,6 +37,7 @@ struct board_info {
        /* ethernet config */
        struct bcm63xx_enet_platform_data enet0;
        struct bcm63xx_enet_platform_data enet1;
+       struct bcm63xx_enetsw_platform_data enetsw;
 
        /* USB config */
        struct bcm63xx_usbd_platform_data usbd;
index 1554721e4808e7ffc67d61bc87646cbdd5a325be..516e6e9a55940ec8abea80160278a744dc220c88 100644 (file)
@@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
        TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
 #endif
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
-#define ASID_INC(asid)                                         \
-({                                                             \
-       unsigned long __asid = asid;                            \
-       __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t"          \
-       ".section\t__asid_inc,\"a\"\n\t"                        \
-       ".word\t1b\n\t"                                         \
-       ".previous"                                             \
-       :"=r" (__asid)                                          \
-       :"0" (__asid));                                         \
-       __asid;                                                 \
-})
-#define ASID_MASK(asid)                                                \
-({                                                             \
-       unsigned long __asid = asid;                            \
-       __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t"      \
-       ".section\t__asid_mask,\"a\"\n\t"                       \
-       ".word\t1b\n\t"                                         \
-       ".previous"                                             \
-       :"=r" (__asid)                                          \
-       :"r" (__asid));                                         \
-       __asid;                                                 \
-})
-#define ASID_VERSION_MASK                                      \
-({                                                             \
-       unsigned long __asid;                                   \
-       __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t"  \
-       ".section\t__asid_version_mask,\"a\"\n\t"               \
-       ".word\t1b\n\t"                                         \
-       ".previous"                                             \
-       :"=r" (__asid));                                        \
-       __asid;                                                 \
-})
-#define ASID_FIRST_VERSION                                     \
-({                                                             \
-       unsigned long __asid = asid;                            \
-       __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t"         \
-       ".section\t__asid_first_version,\"a\"\n\t"              \
-       ".word\t1b\n\t"                                         \
-       ".previous"                                             \
-       :"=r" (__asid));                                        \
-       __asid;                                                 \
-})
-
-#define ASID_FIRST_VERSION_R3000       0x1000
-#define ASID_FIRST_VERSION_R4000       0x100
-#define ASID_FIRST_VERSION_R8000       0x1000
-#define ASID_FIRST_VERSION_RM9000      0x1000
+#define ASID_INC       0x40
+#define ASID_MASK      0xfc0
+
+#elif defined(CONFIG_CPU_R8000)
+
+#define ASID_INC       0x10
+#define ASID_MASK      0xff0
+
+#elif defined(CONFIG_MIPS_MT_SMTC)
+
+#define ASID_INC       0x1
+extern unsigned long smtc_asid_mask;
+#define ASID_MASK      (smtc_asid_mask)
+#define HW_ASID_MASK   0xff
+/* End SMTC/34K debug hack */
+#else /* FIXME: not correct for R6000 */
+
+#define ASID_INC       0x1
+#define ASID_MASK      0xff
 
-#ifdef CONFIG_MIPS_MT_SMTC
-#define SMTC_HW_ASID_MASK              0xff
-extern unsigned int smtc_asid_mask;
 #endif
 
 #define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm)      ASID_MASK(cpu_context((cpu), (mm)))
+#define cpu_asid(cpu, mm)      (cpu_context((cpu), (mm)) & ASID_MASK)
 #define asid_cache(cpu)                (cpu_data[cpu].asid_cache)
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
+/*
+ *  All unused by hardware upper bits will be considered
+ *  as a software asid extension.
+ */
+#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
+#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
+
 #ifndef CONFIG_MIPS_MT_SMTC
 /* Normal, classic MIPS get_new_mmu_context */
 static inline void
@@ -137,10 +114,10 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
        extern void kvm_local_flush_tlb_all(void);
        unsigned long asid = asid_cache(cpu);
 
-       if (!ASID_MASK((asid = ASID_INC(asid)))) {
+       if (! ((asid += ASID_INC) & ASID_MASK) ) {
                if (cpu_has_vtag_icache)
                        flush_icache_all();
-#ifdef CONFIG_VIRTUALIZATION
+#ifdef CONFIG_KVM
                kvm_local_flush_tlb_all();      /* start new asid cycle */
 #else
                local_flush_tlb_all();  /* start new asid cycle */
@@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * free up the ASID value for use and flush any old
         * instances of it from the TLB.
         */
-       oldasid = ASID_MASK(read_c0_entryhi());
+       oldasid = (read_c0_entryhi() & ASID_MASK);
        if(smtc_live_asid[mytlb][oldasid]) {
                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * having ASID_MASK smaller than the hardware maximum,
         * make sure no "soft" bits become "hard"...
         */
-       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
+       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
                         cpu_asid(cpu, next));
        ehb(); /* Make sure it propagates to TCStatus */
        evpe(mtflags);
@@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
 #ifdef CONFIG_MIPS_MT_SMTC
        /* See comments for similar code above */
        mtflags = dvpe();
-       oldasid = ASID_MASK(read_c0_entryhi());
+       oldasid = read_c0_entryhi() & ASID_MASK;
        if(smtc_live_asid[mytlb][oldasid]) {
                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                if(smtc_live_asid[mytlb][oldasid] == 0)
                         smtc_flush_tlb_asid(oldasid);
        }
        /* See comments for similar code above */
-       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
-                        cpu_asid(cpu, next));
+       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
+                        cpu_asid(cpu, next));
        ehb(); /* Make sure it propagates to TCStatus */
        evpe(mtflags);
 #else
@@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
 #ifdef CONFIG_MIPS_MT_SMTC
                /* See comments for similar code above */
                prevvpe = dvpe();
-               oldasid = ASID_MASK(read_c0_entryhi());
+               oldasid = (read_c0_entryhi() & ASID_MASK);
                if (smtc_live_asid[mytlb][oldasid]) {
                        smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                        if(smtc_live_asid[mytlb][oldasid] == 0)
                                smtc_flush_tlb_asid(oldasid);
                }
                /* See comments for similar code above */
-               write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
+               write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
                                | cpu_asid(cpu, mm));
                ehb(); /* Make sure it propagates to TCStatus */
                evpe(prevvpe);
index eab99e536b5c9137e5cd1f117fa57ca1318c6c54..f59552fae9173264ab58ffd3fbe95e080ec5db93 100644 (file)
@@ -46,7 +46,6 @@
 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 #include <linux/pfn.h>
-#include <asm/io.h>
 
 extern void build_clear_page(void);
 extern void build_copy_page(void);
@@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 #define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+#include <asm/io.h>
 
 /*
  * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
@@ -171,14 +171,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
 
 #ifdef CONFIG_FLATMEM
 
-#define pfn_valid(pfn)                                                 \
-({                                                                     \
-       unsigned long __pfn = (pfn);                                    \
-       /* avoid <linux/bootmem.h> include hell */                      \
-       extern unsigned long min_low_pfn;                               \
-                                                                       \
-       __pfn >= min_low_pfn && __pfn < max_mapnr;                      \
-})
+static inline int pfn_valid(unsigned long pfn)
+{
+       /* avoid <linux/mm.h> include hell */
+       extern unsigned long max_mapnr;
+
+       return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
+}
 
 #elif defined(CONFIG_SPARSEMEM)
 
index 71686c897deaa2651bbb5808ae4192f57ea1f54f..1470b7b68b0e98996d271b25b86584fb6f3261a5 100644 (file)
@@ -28,7 +28,6 @@
 /*
  * System setup and hardware flags..
  */
-extern void (*cpu_wait)(void);
 
 extern unsigned int vced_count, vcei_count;
 
index a3186f2bb8a09aab56fcf5324c2bf7209b2c92c4..5e6cd0947393295ea87cba5191c5a6fbf728c527 100644 (file)
 #include <asm/isadep.h>
 #include <uapi/asm/ptrace.h>
 
+/*
+ * This struct defines the way the registers are stored on the stack during a
+ * system call/exception. As usual the registers k0/k1 aren't being saved.
+ */
+struct pt_regs {
+#ifdef CONFIG_32BIT
+       /* Pad bytes for argument save space on the stack. */
+       unsigned long pad0[6];
+#endif
+
+       /* Saved main processor registers. */
+       unsigned long regs[32];
+
+       /* Saved special registers. */
+       unsigned long cp0_status;
+       unsigned long hi;
+       unsigned long lo;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+       unsigned long acx;
+#endif
+       unsigned long cp0_badvaddr;
+       unsigned long cp0_cause;
+       unsigned long cp0_epc;
+#ifdef CONFIG_MIPS_MT_SMTC
+       unsigned long cp0_tcstatus;
+#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+       unsigned long long mpl[3];        /* MTM{0,1,2} */
+       unsigned long long mtp[3];        /* MTP{0,1,2} */
+#endif
+} __aligned(8);
+
 struct task_struct;
 
 extern int ptrace_getregs(struct task_struct *child, __s64 __user *data);
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..f09ff5a
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Cavium, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+/*
+ * KVM MIPS specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ *
+ * If Config[AT] is zero (32-bit CPU), the register contents are
+ * stored in the lower 32-bits of the struct kvm_regs fields and sign
+ * extended to 64-bits.
+ */
+struct kvm_regs {
+       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+       __u64 gpr[32];
+       __u64 hi;
+       __u64 lo;
+       __u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ *
+ * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
+ * are zero filled.
+ */
+struct kvm_fpu {
+       __u64 fpr[32];
+       __u32 fir;
+       __u32 fccr;
+       __u32 fexr;
+       __u32 fenr;
+       __u32 fcsr;
+       __u32 pad;
+};
+
+
+/*
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
+ * registers.  The id field is broken down as follows:
+ *
+ *  bits[2..0]   - Register 'sel' index.
+ *  bits[7..3]   - Register 'rd'  index.
+ *  bits[15..8]  - Must be zero.
+ *  bits[31..16] - 1 -> CP0 registers.
+ *  bits[51..32] - Must be zero.
+ *  bits[63..52] - As per linux/kvm.h
+ *
+ * Other sets registers may be added in the future.  Each set would
+ * have its own identifier in bits[31..16].
+ *
+ * The registers defined in struct kvm_regs are also accessible, the
+ * id values for these are below.
+ */
+
+#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
+
+/*
+ * KVM MIPS specific structures and definitions
+ *
+ */
+struct kvm_debug_exit_arch {
+       __u64 epc;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_mips_interrupt {
+       /* in */
+       __u32 cpu;
+       __u32 irq;
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
index 4d58d8468705940a77764f2615946a7c89485807..b26f7e3172790c14d60469944e607c092c0595f2 100644 (file)
 #define DSP_CONTROL    77
 #define ACX            78
 
+#ifndef __KERNEL__
 /*
  * This struct defines the way the registers are stored on the stack during a
  * system call/exception. As usual the registers k0/k1 aren't being saved.
  */
 struct pt_regs {
-#ifdef CONFIG_32BIT
-       /* Pad bytes for argument save space on the stack. */
-       unsigned long pad0[6];
-#endif
-
        /* Saved main processor registers. */
        unsigned long regs[32];
 
@@ -39,20 +35,11 @@ struct pt_regs {
        unsigned long cp0_status;
        unsigned long hi;
        unsigned long lo;
-#ifdef CONFIG_CPU_HAS_SMARTMIPS
-       unsigned long acx;
-#endif
        unsigned long cp0_badvaddr;
        unsigned long cp0_cause;
        unsigned long cp0_epc;
-#ifdef CONFIG_MIPS_MT_SMTC
-       unsigned long cp0_tcstatus;
-#endif /* CONFIG_MIPS_MT_SMTC */
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-       unsigned long long mpl[3];        /* MTM{0,1,2} */
-       unsigned long long mtp[3];        /* MTP{0,1,2} */
-#endif
 } __attribute__ ((aligned (8)));
+#endif /* __KERNEL__ */
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 #define PTRACE_GETREGS         12
index 3b211507be7f56ffa4c125ce221a52d352f90a46..6a07992ba6c6b0e6f9b577e7693a8dfebaed7a8c 100644 (file)
@@ -92,4 +92,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 16338b84fa79d337c9d0f32c64f37de9a23a43e3..1dee279f96659c6ae2d2ac6d42af77483454076d 100644 (file)
 #define __NR_process_vm_writev         (__NR_Linux + 305)
 #define __NR_kcmp                      (__NR_Linux + 306)
 #define __NR_finit_module              (__NR_Linux + 307)
+#define __NR_getdents64                        (__NR_Linux + 308)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            307
+#define __NR_Linux_syscalls            308
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         307
+#define __NR_64_Linux_syscalls         308
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
index 6ad9e04bdf6210a8b722e92aca5e49161cb4deca..423d871a946ba15ae5b5ea70338530949fa8d166 100644 (file)
@@ -4,7 +4,7 @@
 
 extra-y                := head.o vmlinux.lds
 
-obj-y          += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
+obj-y          += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
                   prom.o ptrace.o reset.o setup.o signal.o syscall.o \
                   time.o topology.o traps.o unaligned.o watch.o vdso.o
 
index e06f777e9c493167d81b9b31acb8bb6a38803a48..1188e00bb120a2637c53f3916f28b1f990a9515e 100644 (file)
@@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+       unsigned long jiffies = cputime_to_jiffies(cputime);
+
+       value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+       value->tv_sec = jiffies / HZ;
+}
+
 #include "../../../fs/binfmt_elf.c"
index 97c5a1668e5347bb4a7668882931349cfdac08a6..202e581e609653ea3f2b651cb2387ba9e04fe65e 100644 (file)
@@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+       unsigned long jiffies = cputime_to_jiffies(cputime);
+
+       value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+       value->tv_sec = jiffies / HZ;
+}
+
 #include "../../../fs/binfmt_elf.c"
index 4bbffdb9024ffb9cf437adb4fd5243e75e68a88c..c6568bf4b1b05559b43bc18f65e33c84a85963ba 100644 (file)
 #include <asm/spram.h>
 #include <asm/uaccess.h>
 
-/*
- * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
- * the implementation of the "wait" feature differs between CPU families. This
- * points to the function that implements CPU specific wait.
- * The wait instruction stops the pipeline and reduces the power consumption of
- * the CPU very much.
- */
-void (*cpu_wait)(void);
-EXPORT_SYMBOL(cpu_wait);
-
-static void r3081_wait(void)
-{
-       unsigned long cfg = read_c0_conf();
-       write_c0_conf(cfg | R30XX_CONF_HALT);
-}
-
-static void r39xx_wait(void)
-{
-       local_irq_disable();
-       if (!need_resched())
-               write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
-       local_irq_enable();
-}
-
-extern void r4k_wait(void);
-
-/*
- * This variant is preferable as it allows testing need_resched and going to
- * sleep depending on the outcome atomically.  Unfortunately the "It is
- * implementation-dependent whether the pipeline restarts when a non-enabled
- * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
- * using this version a gamble.
- */
-void r4k_wait_irqoff(void)
-{
-       local_irq_disable();
-       if (!need_resched())
-               __asm__("       .set    push            \n"
-                       "       .set    mips3           \n"
-                       "       wait                    \n"
-                       "       .set    pop             \n");
-       local_irq_enable();
-       __asm__("       .globl __pastwait       \n"
-               "__pastwait:                    \n");
-}
-
-/*
- * The RM7000 variant has to handle erratum 38.         The workaround is to not
- * have any pending stores when the WAIT instruction is executed.
- */
-static void rm7k_wait_irqoff(void)
-{
-       local_irq_disable();
-       if (!need_resched())
-               __asm__(
-               "       .set    push                                    \n"
-               "       .set    mips3                                   \n"
-               "       .set    noat                                    \n"
-               "       mfc0    $1, $12                                 \n"
-               "       sync                                            \n"
-               "       mtc0    $1, $12         # stalls until W stage  \n"
-               "       wait                                            \n"
-               "       mtc0    $1, $12         # stalls until W stage  \n"
-               "       .set    pop                                     \n");
-       local_irq_enable();
-}
-
-/*
- * The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter.
- * alchemy/common/time.c may override cpu_wait!
- */
-static void au1k_wait(void)
-{
-       __asm__("       .set    mips3                   \n"
-               "       cache   0x14, 0(%0)             \n"
-               "       cache   0x14, 32(%0)            \n"
-               "       sync                            \n"
-               "       nop                             \n"
-               "       wait                            \n"
-               "       nop                             \n"
-               "       nop                             \n"
-               "       nop                             \n"
-               "       nop                             \n"
-               "       .set    mips0                   \n"
-               : : "r" (au1k_wait));
-}
-
-static int __initdata nowait;
-
-static int __init wait_disable(char *s)
-{
-       nowait = 1;
-
-       return 1;
-}
-
-__setup("nowait", wait_disable);
-
 static int __cpuinitdata mips_fpu_disabled;
 
 static int __init fpu_disable(char *s)
@@ -150,105 +51,6 @@ static int __init dsp_disable(char *s)
 
 __setup("nodsp", dsp_disable);
 
-void __init check_wait(void)
-{
-       struct cpuinfo_mips *c = &current_cpu_data;
-
-       if (nowait) {
-               printk("Wait instruction disabled.\n");
-               return;
-       }
-
-       switch (c->cputype) {
-       case CPU_R3081:
-       case CPU_R3081E:
-               cpu_wait = r3081_wait;
-               break;
-       case CPU_TX3927:
-               cpu_wait = r39xx_wait;
-               break;
-       case CPU_R4200:
-/*     case CPU_R4300: */
-       case CPU_R4600:
-       case CPU_R4640:
-       case CPU_R4650:
-       case CPU_R4700:
-       case CPU_R5000:
-       case CPU_R5500:
-       case CPU_NEVADA:
-       case CPU_4KC:
-       case CPU_4KEC:
-       case CPU_4KSC:
-       case CPU_5KC:
-       case CPU_25KF:
-       case CPU_PR4450:
-       case CPU_BMIPS3300:
-       case CPU_BMIPS4350:
-       case CPU_BMIPS4380:
-       case CPU_BMIPS5000:
-       case CPU_CAVIUM_OCTEON:
-       case CPU_CAVIUM_OCTEON_PLUS:
-       case CPU_CAVIUM_OCTEON2:
-       case CPU_JZRISC:
-       case CPU_LOONGSON1:
-       case CPU_XLR:
-       case CPU_XLP:
-               cpu_wait = r4k_wait;
-               break;
-
-       case CPU_RM7000:
-               cpu_wait = rm7k_wait_irqoff;
-               break;
-
-       case CPU_M14KC:
-       case CPU_M14KEC:
-       case CPU_24K:
-       case CPU_34K:
-       case CPU_1004K:
-               cpu_wait = r4k_wait;
-               if (read_c0_config7() & MIPS_CONF7_WII)
-                       cpu_wait = r4k_wait_irqoff;
-               break;
-
-       case CPU_74K:
-               cpu_wait = r4k_wait;
-               if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
-                       cpu_wait = r4k_wait_irqoff;
-               break;
-
-       case CPU_TX49XX:
-               cpu_wait = r4k_wait_irqoff;
-               break;
-       case CPU_ALCHEMY:
-               cpu_wait = au1k_wait;
-               break;
-       case CPU_20KC:
-               /*
-                * WAIT on Rev1.0 has E1, E2, E3 and E16.
-                * WAIT on Rev2.0 and Rev3.0 has E16.
-                * Rev3.1 WAIT is nop, why bother
-                */
-               if ((c->processor_id & 0xff) <= 0x64)
-                       break;
-
-               /*
-                * Another rev is incremeting c0_count at a reduced clock
-                * rate while in WAIT mode.  So we basically have the choice
-                * between using the cp0 timer as clocksource or avoiding
-                * the WAIT instruction.  Until more details are known,
-                * disable the use of WAIT for 20Kc entirely.
-                  cpu_wait = r4k_wait;
-                */
-               break;
-       case CPU_RM9000:
-               if ((c->processor_id & 0x00ff) >= 0x40)
-                       cpu_wait = r4k_wait;
-               break;
-       default:
-               break;
-       }
-}
-
 static inline void check_errata(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
index 35bed0d2342c9a779ef5d0c12a3a8080938484f7..3be9e7bb30ff05dbfa1c4866d3decba0a69a8715 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/bootmem.h>
 #include <linux/crash_dump.h>
 #include <asm/uaccess.h>
+#include <linux/slab.h>
 
 static int __init parse_savemaxmem(char *p)
 {
index cf5509f13dd57bc832e1db50d65f88ccda6e81c7..dba90ec0dc385ffcad5cc09eda51031d4e9a0fcc 100644 (file)
 #define MCOUNT_OFFSET_INSNS 4
 #endif
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+
 /* Arch override because MIPS doesn't need to run this from stop_machine() */
 void arch_ftrace_update_code(int command)
 {
        ftrace_modify_all_code(command);
 }
 
+#endif
+
 /*
  * Check if the address is in kernel space
  *
index 5c2ba9f08a80d33ed0cdf61ffaf429524c3ffd4f..31fa856829cbf2620521317e5247d42b9e3fb087 100644 (file)
@@ -122,7 +122,7 @@ handle_vcei:
        __FINIT
 
        .align  5       /* 32 byte rollback region */
-LEAF(r4k_wait)
+LEAF(__r4k_wait)
        .set    push
        .set    noreorder
        /* start of rollback region */
@@ -146,14 +146,14 @@ LEAF(r4k_wait)
        jr      ra
        nop
        .set    pop
-       END(r4k_wait)
+       END(__r4k_wait)
 
        .macro  BUILD_ROLLBACK_PROLOGUE handler
        FEXPORT(rollback_\handler)
        .set    push
        .set    noat
        MFC0    k0, CP0_EPC
-       PTR_LA  k1, r4k_wait
+       PTR_LA  k1, __r4k_wait
        ori     k0, 0x1f        /* 32 byte rollback region */
        xori    k0, 0x1f
        bne     k0, k1, 9f
@@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    noreorder
        /* check if TLB contains a entry for EPC */
        MFC0    k1, CP0_ENTRYHI
-       andi    k1, 0xff        /* ASID_MASK patched at run-time!! */
+       andi    k1, 0xff        /* ASID_MASK */
        MFC0    k0, CP0_EPC
        PTR_SRL k0, _PAGE_SHIFT + 1
        PTR_SLL k0, _PAGE_SHIFT + 1
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644 (file)
index 0000000..0c655de
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * MIPS idle loop and WAIT instruction support.
+ *
+ * Copyright (C) xxxx  the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004  Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012         MIPS Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
+ * the implementation of the "wait" feature differs between CPU families. This
+ * points to the function that implements CPU specific wait.
+ * The wait instruction stops the pipeline and reduces the power consumption of
+ * the CPU very much.
+ */
+void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
+
+static void r3081_wait(void)
+{
+       unsigned long cfg = read_c0_conf();
+       write_c0_conf(cfg | R30XX_CONF_HALT);
+       local_irq_enable();
+}
+
+static void r39xx_wait(void)
+{
+       if (!need_resched())
+               write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+       local_irq_enable();
+}
+
+void r4k_wait(void)
+{
+       local_irq_enable();
+       __r4k_wait();
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically.  Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+void r4k_wait_irqoff(void)
+{
+       if (!need_resched())
+               __asm__(
+               "       .set    push            \n"
+               "       .set    mips3           \n"
+               "       wait                    \n"
+               "       .set    pop             \n");
+       local_irq_enable();
+       __asm__(
+       "       .globl __pastwait       \n"
+       "__pastwait:                    \n");
+}
+
+/*
+ * The RM7000 variant has to handle erratum 38.         The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void rm7k_wait_irqoff(void)
+{
+       if (!need_resched())
+               __asm__(
+               "       .set    push                                    \n"
+               "       .set    mips3                                   \n"
+               "       .set    noat                                    \n"
+               "       mfc0    $1, $12                                 \n"
+               "       sync                                            \n"
+               "       mtc0    $1, $12         # stalls until W stage  \n"
+               "       wait                                            \n"
+               "       mtc0    $1, $12         # stalls until W stage  \n"
+               "       .set    pop                                     \n");
+       local_irq_enable();
+}
+
+/*
+ * Au1 'wait' is only useful when the 32kHz counter is used as timer,
+ * since coreclock (and the cp0 counter) stops upon executing it. Only an
+ * interrupt can wake it, so they must be enabled before entering idle modes.
+ */
+static void au1k_wait(void)
+{
+       unsigned long c0status = read_c0_status() | 1;  /* irqs on */
+
+       __asm__(
+       "       .set    mips3                   \n"
+       "       cache   0x14, 0(%0)             \n"
+       "       cache   0x14, 32(%0)            \n"
+       "       sync                            \n"
+       "       mtc0    %1, $12                 \n" /* wr c0status */
+       "       wait                            \n"
+       "       nop                             \n"
+       "       nop                             \n"
+       "       nop                             \n"
+       "       nop                             \n"
+       "       .set    mips0                   \n"
+       : : "r" (au1k_wait), "r" (c0status));
+}
+
+static int __initdata nowait;
+
+static int __init wait_disable(char *s)
+{
+       nowait = 1;
+
+       return 1;
+}
+
+__setup("nowait", wait_disable);
+
+void __init check_wait(void)
+{
+       struct cpuinfo_mips *c = &current_cpu_data;
+
+       if (nowait) {
+               printk("Wait instruction disabled.\n");
+               return;
+       }
+
+       switch (c->cputype) {
+       case CPU_R3081:
+       case CPU_R3081E:
+               cpu_wait = r3081_wait;
+               break;
+       case CPU_TX3927:
+               cpu_wait = r39xx_wait;
+               break;
+       case CPU_R4200:
+/*     case CPU_R4300: */
+       case CPU_R4600:
+       case CPU_R4640:
+       case CPU_R4650:
+       case CPU_R4700:
+       case CPU_R5000:
+       case CPU_R5500:
+       case CPU_NEVADA:
+       case CPU_4KC:
+       case CPU_4KEC:
+       case CPU_4KSC:
+       case CPU_5KC:
+       case CPU_25KF:
+       case CPU_PR4450:
+       case CPU_BMIPS3300:
+       case CPU_BMIPS4350:
+       case CPU_BMIPS4380:
+       case CPU_BMIPS5000:
+       case CPU_CAVIUM_OCTEON:
+       case CPU_CAVIUM_OCTEON_PLUS:
+       case CPU_CAVIUM_OCTEON2:
+       case CPU_JZRISC:
+       case CPU_LOONGSON1:
+       case CPU_XLR:
+       case CPU_XLP:
+               cpu_wait = r4k_wait;
+               break;
+
+       case CPU_RM7000:
+               cpu_wait = rm7k_wait_irqoff;
+               break;
+
+       case CPU_M14KC:
+       case CPU_M14KEC:
+       case CPU_24K:
+       case CPU_34K:
+       case CPU_1004K:
+               cpu_wait = r4k_wait;
+               if (read_c0_config7() & MIPS_CONF7_WII)
+                       cpu_wait = r4k_wait_irqoff;
+               break;
+
+       case CPU_74K:
+               cpu_wait = r4k_wait;
+               if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
+                       cpu_wait = r4k_wait_irqoff;
+               break;
+
+       case CPU_TX49XX:
+               cpu_wait = r4k_wait_irqoff;
+               break;
+       case CPU_ALCHEMY:
+               cpu_wait = au1k_wait;
+               break;
+       case CPU_20KC:
+               /*
+                * WAIT on Rev1.0 has E1, E2, E3 and E16.
+                * WAIT on Rev2.0 and Rev3.0 has E16.
+                * Rev3.1 WAIT is nop, why bother
+                */
+               if ((c->processor_id & 0xff) <= 0x64)
+                       break;
+
+               /*
+                * Another rev is incremeting c0_count at a reduced clock
+                * rate while in WAIT mode.  So we basically have the choice
+                * between using the cp0 timer as clocksource or avoiding
+                * the WAIT instruction.  Until more details are known,
+                * disable the use of WAIT for 20Kc entirely.
+                  cpu_wait = r4k_wait;
+                */
+               break;
+       case CPU_RM9000:
+               if ((c->processor_id & 0x00ff) >= 0x40)
+                       cpu_wait = r4k_wait;
+               break;
+       default:
+               break;
+       }
+}
+
+static void smtc_idle_hook(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       void smtc_idle_loop_hook(void);
+
+       smtc_idle_loop_hook();
+#endif
+}
+
+void arch_cpu_idle(void)
+{
+       smtc_idle_hook();
+       if (cpu_wait)
+               cpu_wait();
+       else
+               local_irq_enable();
+}
index 12bc4ebdf55b6bf5e087532feafcaddca94b417e..1f8187ab0997be478f69b66eadb8ac4a6bf7e489 100644 (file)
@@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
-       free_insn_slot(p->ainsn.insn, 0);
+       if (p->ainsn.insn) {
+               free_insn_slot(p->ainsn.insn, 0);
+               p->ainsn.insn = NULL;
+       }
 }
 
 static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
index a3e461408b7e830758d3b3dfeed661573277131e..acb34373679e21f9940f62f251e64dc1e32e5f17 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
+#include <asm/idle.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
 #include <asm/prom.h>
index eb902c1f0cad4c50031836ad73225f620be8d7d5..c6a041d9d05d57fcd71f91a228c86524e00efd08 100644 (file)
@@ -51,19 +51,6 @@ void arch_cpu_idle_dead(void)
 }
 #endif
 
-void arch_cpu_idle(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       extern void smtc_idle_loop_hook(void);
-
-       smtc_idle_loop_hook();
-#endif
-       if (cpu_wait)
-               (*cpu_wait)();
-       else
-               local_irq_enable();
-}
-
 asmlinkage void ret_from_fork(void);
 asmlinkage void ret_from_kernel_thread(void);
 
@@ -224,6 +211,9 @@ struct mips_frame_info {
        int             pc_offset;
 };
 
+#define J_TARGET(pc,target)    \
+               (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+
 static inline int is_ra_save_ins(union mips_instruction *ip)
 {
 #ifdef CONFIG_CPU_MICROMIPS
@@ -264,7 +254,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
 #endif
 }
 
-static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
+static inline int is_jump_ins(union mips_instruction *ip)
 {
 #ifdef CONFIG_CPU_MICROMIPS
        /*
@@ -288,6 +278,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
                return 0;
        return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
 #else
+       if (ip->j_format.opcode == j_op)
+               return 1;
        if (ip->j_format.opcode == jal_op)
                return 1;
        if (ip->r_format.opcode != spec_op)
@@ -350,7 +342,7 @@ static int get_frame_info(struct mips_frame_info *info)
 
        for (i = 0; i < max_insns; i++, ip++) {
 
-               if (is_jal_jalr_jr_ins(ip))
+               if (is_jump_ins(ip))
                        break;
                if (!info->frame_size) {
                        if (is_sp_move_ins(ip))
@@ -393,15 +385,42 @@ err:
 
 static struct mips_frame_info schedule_mfi __read_mostly;
 
+#ifdef CONFIG_KALLSYMS
+static unsigned long get___schedule_addr(void)
+{
+       return kallsyms_lookup_name("__schedule");
+}
+#else
+static unsigned long get___schedule_addr(void)
+{
+       union mips_instruction *ip = (void *)schedule;
+       int max_insns = 8;
+       int i;
+
+       for (i = 0; i < max_insns; i++, ip++) {
+               if (ip->j_format.opcode == j_op)
+                       return J_TARGET(ip, ip->j_format.target);
+       }
+       return 0;
+}
+#endif
+
 static int __init frame_info_init(void)
 {
        unsigned long size = 0;
 #ifdef CONFIG_KALLSYMS
        unsigned long ofs;
+#endif
+       unsigned long addr;
+
+       addr = get___schedule_addr();
+       if (!addr)
+               addr = (unsigned long)schedule;
 
-       kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
+#ifdef CONFIG_KALLSYMS
+       kallsyms_lookup_size_offset(addr, &size, &ofs);
 #endif
-       schedule_mfi.func = schedule;
+       schedule_mfi.func = (void *)addr;
        schedule_mfi.func_size = size;
 
        get_frame_info(&schedule_mfi);
index 93c070b41b0dae92307bc408284f057cb287b825..6fa198db89994cbe504dee1dadd51b25f9a38790 100644 (file)
@@ -40,6 +40,7 @@
 #include <asm/processor.h>
 #include <asm/vpe.h>
 #include <asm/rtlx.h>
+#include <asm/setup.h>
 
 static struct rtlx_info *rtlx;
 static int major;
index 36cfd4060e1f423eed05869a1adc9591f36ab662..97a5909a61cf0c623dfdf8284eaf11bf13e7adf7 100644 (file)
@@ -423,4 +423,5 @@ sys_call_table:
        PTR     sys_process_vm_writev           /* 5305 */
        PTR     sys_kcmp
        PTR     sys_finit_module
+       PTR     sys_getdents64
        .size   sys_call_table,.-sys_call_table
index c17619fe18e32a9f23a5df7e18b0b943b5d0cf2c..6e7862ab46cc4a6fef3c31e1ade85e04b357824f 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/atomic.h>
 #include <asm/cpu.h>
 #include <asm/processor.h>
+#include <asm/idle.h>
 #include <asm/r4k-timer.h>
 #include <asm/mmu_context.h>
 #include <asm/time.h>
index 31d22f3121c98bb8c0b57488c60c58d4c0ca5b4c..75a4fd709841a9df42d4eba3218e053f7bf8c351 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/hardirq.h>
 #include <asm/hazards.h>
 #include <asm/irq.h>
+#include <asm/idle.h>
 #include <asm/mmu_context.h>
 #include <asm/mipsregs.h>
 #include <asm/cacheflush.h>
@@ -111,7 +112,7 @@ static int vpe0limit;
 static int ipibuffers;
 static int nostlb;
 static int asidmask;
-unsigned int smtc_asid_mask = 0xff;
+unsigned long smtc_asid_mask = 0xff;
 
 static int __init vpe0tcs(char *str)
 {
@@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
        unsigned long flags;
        int mtflags;
        unsigned long tcrestart;
-       extern void r4k_wait_irqoff(void), __pastwait(void);
        int set_resched_flag = (type == LINUX_SMP_IPI &&
                                action == SMP_RESCHEDULE_YOURSELF);
 
@@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
                         */
                        if (cpu_wait == r4k_wait_irqoff) {
                                tcrestart = read_tc_c0_tcrestart();
-                               if (tcrestart >= (unsigned long)r4k_wait_irqoff
-                                   && tcrestart < (unsigned long)__pastwait) {
+                               if (address_is_in_r4k_wait_irqoff(tcrestart)) {
                                        write_tc_c0_tcrestart(__pastwait);
                                        tcstatus &= ~TCSTATUS_IXMT;
                                        write_tc_c0_tcstatus(tcstatus);
@@ -1395,7 +1394,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
        asid = asid_cache(cpu);
 
        do {
-               if (!ASID_MASK(ASID_INC(asid))) {
+               if (!((asid += ASID_INC) & ASID_MASK) ) {
                        if (cpu_has_vtag_icache)
                                flush_icache_all();
                        /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1413,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
                                                mips_ihb();
                                        }
                                        tcstat = read_tc_c0_tcstatus();
-                                       smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
+                                       smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
                                        if (!prevhalt)
                                                write_tc_c0_tchalt(0);
                                }
@@ -1423,7 +1422,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
                                asid = ASID_FIRST_VERSION;
                        local_flush_tlb_all();  /* start new asid cycle */
                }
-       } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
+       } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
 
        /*
         * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1460,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
                tlb_read();
                ehb();
                ehi = read_c0_entryhi();
-               if (ASID_MASK(ehi) == asid) {
+               if ((ehi & ASID_MASK) == asid) {
                    /*
                     * Invalidate only entries with specified ASID,
                     * makiing sure all entries differ.
index 77cff1f6d050cb92e21475ae52a9ef2f037b5bc5..a75ae40184aa3a5d35e08668e71022230ee087bc 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/dsp.h>
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
+#include <asm/idle.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
@@ -57,7 +58,6 @@
 #include <asm/uasm.h>
 
 extern void check_wait(void);
-extern asmlinkage void r4k_wait(void);
 extern asmlinkage void rollback_handle_int(void);
 extern asmlinkage void handle_int(void);
 extern u32 handle_tlbl[];
@@ -897,22 +897,24 @@ out_sigsegv:
 
 asmlinkage void do_tr(struct pt_regs *regs)
 {
-       unsigned int opcode, tcode = 0;
+       u32 opcode, tcode = 0;
        u16 instr[2];
-       unsigned long epc = exception_epc(regs);
+       unsigned long epc = msk_isa16_mode(exception_epc(regs));
 
-       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
-               (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+       if (get_isa16_mode(regs->cp0_epc)) {
+               if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
+                   __get_user(instr[1], (u16 __user *)(epc + 2)))
                        goto out_sigsegv;
-       opcode = (instr[0] << 16) | instr[1];
-
-       /* Immediate versions don't provide a code.  */
-       if (!(opcode & OPCODE)) {
-               if (get_isa16_mode(regs->cp0_epc))
-                       /* microMIPS */
-                       tcode = (opcode >> 12) & 0x1f;
-               else
-                       tcode = ((opcode >> 6) & ((1 << 10) - 1));
+               opcode = (instr[0] << 16) | instr[1];
+               /* Immediate versions don't provide a code.  */
+               if (!(opcode & OPCODE))
+                       tcode = (opcode >> 12) & ((1 << 4) - 1);
+       } else {
+               if (__get_user(opcode, (u32 __user *)epc))
+                       goto out_sigsegv;
+               /* Immediate versions don't provide a code.  */
+               if (!(opcode & OPCODE))
+                       tcode = (opcode >> 6) & ((1 << 10) - 1);
        }
 
        do_trap_or_bp(regs, tcode, "Trap");
@@ -1542,7 +1544,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
                extern char except_vec_vi, except_vec_vi_lui;
                extern char except_vec_vi_ori, except_vec_vi_end;
                extern char rollback_except_vec_vi;
-               char *vec_start = (cpu_wait == r4k_wait) ?
+               char *vec_start = using_rollback_handler() ?
                        &rollback_except_vec_vi : &except_vec_vi;
 #ifdef CONFIG_MIPS_MT_SMTC
                /*
@@ -1656,7 +1658,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
        unsigned int cpu = smp_processor_id();
        unsigned int status_set = ST0_CU0;
        unsigned int hwrena = cpu_hwrena_impl_bits;
-       unsigned long asid = 0;
 #ifdef CONFIG_MIPS_MT_SMTC
        int secondaryTC = 0;
        int bootTC = (cpu == 0);
@@ -1740,9 +1741,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
        }
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-       asid = ASID_FIRST_VERSION;
-       cpu_data[cpu].asid_cache = asid;
-       TLBMISS_HANDLER_SETUP();
+       if (!cpu_data[cpu].asid_cache)
+               cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
 
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
@@ -1814,10 +1814,8 @@ void __init trap_init(void)
        extern char except_vec4;
        extern char except_vec3_r4000;
        unsigned long i;
-       int rollback;
 
        check_wait();
-       rollback = (cpu_wait == r4k_wait);
 
 #if defined(CONFIG_KGDB)
        if (kgdb_early_setup)
@@ -1894,7 +1892,8 @@ void __init trap_init(void)
        if (board_be_init)
                board_be_init();
 
-       set_except_vector(0, rollback ? rollback_handle_int : handle_int);
+       set_except_vector(0, using_rollback_handler() ? rollback_handle_int
+                                                     : handle_int);
        set_except_vector(1, handle_tlbm);
        set_except_vector(2, handle_tlbl);
        set_except_vector(3, handle_tlbs);
index e0dad0289797b292f9b436de9afa492eca0112de..dd203e59e6fd650767a3ae5286e0599f4dbc15b7 100644 (file)
@@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 long
 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 {
-       return -EINVAL;
+       return -ENOIOCTLCMD;
 }
 
 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
@@ -401,7 +401,7 @@ int
 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
                                    struct kvm_guest_debug *dbg)
 {
-       return -EINVAL;
+       return -ENOIOCTLCMD;
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -475,14 +475,248 @@ int
 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                struct kvm_mp_state *mp_state)
 {
-       return -EINVAL;
+       return -ENOIOCTLCMD;
 }
 
 int
 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                struct kvm_mp_state *mp_state)
 {
-       return -EINVAL;
+       return -ENOIOCTLCMD;
+}
+
+#define MIPS_CP0_32(_R, _S)                                    \
+       (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
+
+#define MIPS_CP0_64(_R, _S)                                    \
+       (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
+
+#define KVM_REG_MIPS_CP0_INDEX         MIPS_CP0_32(0, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO0      MIPS_CP0_64(2, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO1      MIPS_CP0_64(3, 0)
+#define KVM_REG_MIPS_CP0_CONTEXT       MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL     MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK      MIPS_CP0_32(5, 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN     MIPS_CP0_32(5, 1)
+#define KVM_REG_MIPS_CP0_WIRED         MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_HWRENA                MIPS_CP0_32(7, 0)
+#define KVM_REG_MIPS_CP0_BADVADDR      MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_COUNT         MIPS_CP0_32(9, 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI       MIPS_CP0_64(10, 0)
+#define KVM_REG_MIPS_CP0_COMPARE       MIPS_CP0_32(11, 0)
+#define KVM_REG_MIPS_CP0_STATUS                MIPS_CP0_32(12, 0)
+#define KVM_REG_MIPS_CP0_CAUSE         MIPS_CP0_32(13, 0)
+#define KVM_REG_MIPS_CP0_EBASE         MIPS_CP0_64(15, 1)
+#define KVM_REG_MIPS_CP0_CONFIG                MIPS_CP0_32(16, 0)
+#define KVM_REG_MIPS_CP0_CONFIG1       MIPS_CP0_32(16, 1)
+#define KVM_REG_MIPS_CP0_CONFIG2       MIPS_CP0_32(16, 2)
+#define KVM_REG_MIPS_CP0_CONFIG3       MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG7       MIPS_CP0_32(16, 7)
+#define KVM_REG_MIPS_CP0_XCONTEXT      MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_ERROREPC      MIPS_CP0_64(30, 0)
+
+static u64 kvm_mips_get_one_regs[] = {
+       KVM_REG_MIPS_R0,
+       KVM_REG_MIPS_R1,
+       KVM_REG_MIPS_R2,
+       KVM_REG_MIPS_R3,
+       KVM_REG_MIPS_R4,
+       KVM_REG_MIPS_R5,
+       KVM_REG_MIPS_R6,
+       KVM_REG_MIPS_R7,
+       KVM_REG_MIPS_R8,
+       KVM_REG_MIPS_R9,
+       KVM_REG_MIPS_R10,
+       KVM_REG_MIPS_R11,
+       KVM_REG_MIPS_R12,
+       KVM_REG_MIPS_R13,
+       KVM_REG_MIPS_R14,
+       KVM_REG_MIPS_R15,
+       KVM_REG_MIPS_R16,
+       KVM_REG_MIPS_R17,
+       KVM_REG_MIPS_R18,
+       KVM_REG_MIPS_R19,
+       KVM_REG_MIPS_R20,
+       KVM_REG_MIPS_R21,
+       KVM_REG_MIPS_R22,
+       KVM_REG_MIPS_R23,
+       KVM_REG_MIPS_R24,
+       KVM_REG_MIPS_R25,
+       KVM_REG_MIPS_R26,
+       KVM_REG_MIPS_R27,
+       KVM_REG_MIPS_R28,
+       KVM_REG_MIPS_R29,
+       KVM_REG_MIPS_R30,
+       KVM_REG_MIPS_R31,
+
+       KVM_REG_MIPS_HI,
+       KVM_REG_MIPS_LO,
+       KVM_REG_MIPS_PC,
+
+       KVM_REG_MIPS_CP0_INDEX,
+       KVM_REG_MIPS_CP0_CONTEXT,
+       KVM_REG_MIPS_CP0_PAGEMASK,
+       KVM_REG_MIPS_CP0_WIRED,
+       KVM_REG_MIPS_CP0_BADVADDR,
+       KVM_REG_MIPS_CP0_ENTRYHI,
+       KVM_REG_MIPS_CP0_STATUS,
+       KVM_REG_MIPS_CP0_CAUSE,
+       /* EPC set via kvm_regs, et al. */
+       KVM_REG_MIPS_CP0_CONFIG,
+       KVM_REG_MIPS_CP0_CONFIG1,
+       KVM_REG_MIPS_CP0_CONFIG2,
+       KVM_REG_MIPS_CP0_CONFIG3,
+       KVM_REG_MIPS_CP0_CONFIG7,
+       KVM_REG_MIPS_CP0_ERROREPC
+};
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       s64 v;
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+               v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+               break;
+       case KVM_REG_MIPS_HI:
+               v = (long)vcpu->arch.hi;
+               break;
+       case KVM_REG_MIPS_LO:
+               v = (long)vcpu->arch.lo;
+               break;
+       case KVM_REG_MIPS_PC:
+               v = (long)vcpu->arch.pc;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               v = (long)kvm_read_c0_guest_index(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               v = (long)kvm_read_c0_guest_context(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               v = (long)kvm_read_c0_guest_pagemask(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               v = (long)kvm_read_c0_guest_wired(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               v = (long)kvm_read_c0_guest_badvaddr(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               v = (long)kvm_read_c0_guest_entryhi(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               v = (long)kvm_read_c0_guest_status(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               v = (long)kvm_read_c0_guest_cause(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               v = (long)kvm_read_c0_guest_errorepc(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG:
+               v = (long)kvm_read_c0_guest_config(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG1:
+               v = (long)kvm_read_c0_guest_config1(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG2:
+               v = (long)kvm_read_c0_guest_config2(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG3:
+               v = (long)kvm_read_c0_guest_config3(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG7:
+               v = (long)kvm_read_c0_guest_config7(cop0);
+               break;
+       default:
+               return -EINVAL;
+       }
+       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+               return put_user(v, uaddr64);
+       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+               u32 v32 = (u32)v;
+               return put_user(v32, uaddr32);
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       u64 v;
+
+       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+               if (get_user(v, uaddr64) != 0)
+                       return -EFAULT;
+       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+               s32 v32;
+
+               if (get_user(v32, uaddr32) != 0)
+                       return -EFAULT;
+               v = (s64)v32;
+       } else {
+               return -EINVAL;
+       }
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0:
+               /* Silently ignore requests to set $0 */
+               break;
+       case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+               vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+               break;
+       case KVM_REG_MIPS_HI:
+               vcpu->arch.hi = v;
+               break;
+       case KVM_REG_MIPS_LO:
+               vcpu->arch.lo = v;
+               break;
+       case KVM_REG_MIPS_PC:
+               vcpu->arch.pc = v;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               kvm_write_c0_guest_index(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               kvm_write_c0_guest_context(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               kvm_write_c0_guest_pagemask(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               kvm_write_c0_guest_wired(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               kvm_write_c0_guest_badvaddr(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               kvm_write_c0_guest_entryhi(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               kvm_write_c0_guest_status(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               kvm_write_c0_guest_cause(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               kvm_write_c0_guest_errorepc(cop0, v);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
 }
 
 long
@@ -491,9 +725,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
        long r;
-       int intr;
 
        switch (ioctl) {
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       return -EFAULT;
+               if (ioctl == KVM_SET_ONE_REG)
+                       return kvm_mips_set_reg(vcpu, &reg);
+               else
+                       return kvm_mips_get_reg(vcpu, &reg);
+       }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               u64 __user *reg_dest;
+               struct kvm_reg_list reg_list;
+               unsigned n;
+
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       return -EFAULT;
+               n = reg_list.n;
+               reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       return -EFAULT;
+               if (n < reg_list.n)
+                       return -E2BIG;
+               reg_dest = user_list->reg;
+               if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
+                                sizeof(kvm_mips_get_one_regs)))
+                       return -EFAULT;
+               return 0;
+       }
        case KVM_NMI:
                /* Treat the NMI as a CPU reset */
                r = kvm_mips_reset_vcpu(vcpu);
@@ -505,8 +768,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
                        if (copy_from_user(&irq, argp, sizeof(irq)))
                                goto out;
 
-                       intr = (int)irq.irq;
-
                        kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
                                  irq.irq);
 
@@ -514,7 +775,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
                        break;
                }
        default:
-               r = -EINVAL;
+               r = -ENOIOCTLCMD;
        }
 
 out:
@@ -565,7 +826,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 
        switch (ioctl) {
        default:
-               r = -EINVAL;
+               r = -ENOIOCTLCMD;
        }
 
        return r;
@@ -593,13 +854,13 @@ void kvm_arch_exit(void)
 int
 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-       return -ENOTSUPP;
+       return -ENOIOCTLCMD;
 }
 
 int
 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-       return -ENOTSUPP;
+       return -ENOIOCTLCMD;
 }
 
 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -609,12 +870,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       return -ENOTSUPP;
+       return -ENOIOCTLCMD;
 }
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       return -ENOTSUPP;
+       return -ENOIOCTLCMD;
 }
 
 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
@@ -627,6 +888,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        int r;
 
        switch (ext) {
+       case KVM_CAP_ONE_REG:
+               r = 1;
+               break;
        case KVM_CAP_COALESCED_MMIO:
                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
                break;
@@ -635,7 +899,6 @@ int kvm_dev_ioctl_check_extension(long ext)
                break;
        }
        return r;
-
 }
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -677,28 +940,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
        int i;
 
-       for (i = 0; i < 32; i++)
-               vcpu->arch.gprs[i] = regs->gprs[i];
-
+       for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               vcpu->arch.gprs[i] = regs->gpr[i];
+       vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
        vcpu->arch.hi = regs->hi;
        vcpu->arch.lo = regs->lo;
        vcpu->arch.pc = regs->pc;
 
-       return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+       return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
        int i;
 
-       for (i = 0; i < 32; i++)
-               regs->gprs[i] = vcpu->arch.gprs[i];
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               regs->gpr[i] = vcpu->arch.gprs[i];
 
        regs->hi = vcpu->arch.hi;
        regs->lo = vcpu->arch.lo;
        regs->pc = vcpu->arch.pc;
 
-       return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+       return 0;
 }
 
 void kvm_mips_comparecount_func(unsigned long data)
index 2b2bac9a40aa00a762a0efae5310fddd0521fb5c..4b6274b47f3368b289b378703e8e9a17de5f9275 100644 (file)
@@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
                                printk("MTCz, cop0->reg[EBASE]: %#lx\n",
                                       kvm_read_c0_guest_ebase(cop0));
                        } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
-                               uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
+                               uint32_t nasid =
+                                   vcpu->arch.gprs[rt] & ASID_MASK;
                                if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
                                    &&
-                                   (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
-                                     != nasid)) {
+                                   ((kvm_read_c0_guest_entryhi(cop0) &
+                                     ASID_MASK) != nasid)) {
 
                                        kvm_debug
                                            ("MTCz, change ASID from %#lx to %#lx\n",
-                                            ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
-                                            ASID_MASK(vcpu->arch.gprs[rt]));
+                                            kvm_read_c0_guest_entryhi(cop0) &
+                                            ASID_MASK,
+                                            vcpu->arch.gprs[rt] & ASID_MASK);
 
                                        /* Blow away the shadow host TLBs */
                                        kvm_mips_flush_host_tlb(1);
@@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
                 * resulting handler will do the right thing
                 */
                index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
-                                                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+                                                 (kvm_read_c0_guest_entryhi
+                                                  (cop0) & ASID_MASK));
 
                if (index < 0) {
                        vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
@@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
        struct kvm_vcpu_arch *arch = &vcpu->arch;
        enum emulation_result er = EMULATE_DONE;
        unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
-                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 
        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
                /* save old pc */
@@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
        enum emulation_result er = EMULATE_DONE;
        unsigned long entryhi =
                (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 
        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
                /* save old pc */
@@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
        struct kvm_vcpu_arch *arch = &vcpu->arch;
        enum emulation_result er = EMULATE_DONE;
        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 
        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
                /* save old pc */
@@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
        struct kvm_vcpu_arch *arch = &vcpu->arch;
        enum emulation_result er = EMULATE_DONE;
        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
 
        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
                /* save old pc */
@@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
 {
        struct mips_coproc *cop0 = vcpu->arch.cop0;
        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
        struct kvm_vcpu_arch *arch = &vcpu->arch;
        enum emulation_result er = EMULATE_DONE;
 
@@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
         */
        index = kvm_mips_guest_tlb_lookup(vcpu,
                                          (va & VPN2_MASK) |
-                                         ASID_MASK(kvm_read_c0_guest_entryhi
-                                          (vcpu->arch.cop0)));
+                                         (kvm_read_c0_guest_entryhi
+                                          (vcpu->arch.cop0) & ASID_MASK));
        if (index < 0) {
                if (exccode == T_TLB_LD_MISS) {
                        er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
index 89511a9258d394f2d540e722fdaaa8f5aa6a82f1..c777dd36d4a8bf88be1000dc993bc2d8774b6a33 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/kvm_host.h>
+#include <linux/srcu.h>
+
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
@@ -51,13 +53,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
 
 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 {
-       return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
+       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
 }
 
 
 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 {
-       return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
+       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
 }
 
 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
@@ -84,7 +86,7 @@ void kvm_mips_dump_host_tlbs(void)
        old_pagemask = read_c0_pagemask();
 
        printk("HOST TLBs:\n");
-       printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
+       printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
 
        for (i = 0; i < current_cpu_data.tlbsize; i++) {
                write_c0_index(i);
@@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
        }
 }
 
-static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
 {
+       int srcu_idx, err = 0;
        pfn_t pfn;
 
        if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
-               return;
+               return 0;
 
+        srcu_idx = srcu_read_lock(&kvm->srcu);
        pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
 
        if (kvm_mips_is_error_pfn(pfn)) {
-               panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+               kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+               err = -EFAULT;
+               goto out;
        }
 
        kvm->arch.guest_pmap[gfn] = pfn;
-       return;
+out:
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+       return err;
 }
 
 /* Translate guest KSEG0 addresses to Host PA */
@@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
                        gva);
                return KVM_INVALID_PAGE;
        }
-       kvm_mips_map_page(vcpu->kvm, gfn);
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return KVM_INVALID_ADDR;
+
        return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
 }
 
@@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
        even = !(gfn & 0x1);
        vaddr = badvaddr & (PAGE_MASK << 1);
 
-       kvm_mips_map_page(vcpu->kvm, gfn);
-       kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return -1;
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+               return -1;
 
        if (even) {
                pfn0 = kvm->arch.guest_pmap[gfn];
@@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
                pfn0 = 0;
                pfn1 = 0;
        } else {
-               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
-               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
+                       return -1;
+
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
+                       return -1;
 
                pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
                pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
@@ -428,7 +445,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
 
        for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
                if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
-                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
+                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
                        index = i;
                        break;
                }
@@ -626,7 +643,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
 {
        unsigned long asid = asid_cache(cpu);
 
-       if (!(ASID_MASK(ASID_INC(asid)))) {
+       if (!((asid += ASID_INC) & ASID_MASK)) {
                if (cpu_has_vtag_icache) {
                        flush_icache_all();
                }
@@ -804,7 +821,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        if (!newasid) {
                /* If we preempted while the guest was executing, then reload the pre-empted ASID */
                if (current->flags & PF_VCPU) {
-                       write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
+                       write_c0_entryhi(vcpu->arch.
+                                        preempt_entryhi & ASID_MASK);
                        ehb();
                }
        } else {
@@ -816,11 +834,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 */
                if (current->flags & PF_VCPU) {
                        if (KVM_GUEST_KERNEL_MODE(vcpu))
-                               write_c0_entryhi(ASID_MASK(vcpu->arch.
-                                                guest_kernel_asid[cpu]));
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_kernel_asid[cpu] &
+                                                ASID_MASK);
                        else
-                               write_c0_entryhi(ASID_MASK(vcpu->arch.
-                                                guest_user_asid[cpu]));
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_user_asid[cpu] &
+                                                ASID_MASK);
                        ehb();
                }
        }
@@ -879,7 +899,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
                            kvm_mips_guest_tlb_lookup(vcpu,
                                                      ((unsigned long) opc & VPN2_MASK)
                                                      |
-                                                     ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+                                                     (kvm_read_c0_guest_entryhi
+                                                      (cop0) & ASID_MASK));
                        if (index < 0) {
                                kvm_err
                                    ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
index 466aeef044bd18c777ba6adff2dde1f4366b3fb3..30d725321db1e23dcaddd642b42edf97739448e6 100644 (file)
@@ -345,54 +345,6 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
        return ret;
 }
 
-static int
-kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
-       kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
-       kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
-       kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
-       kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
-
-       kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
-       kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
-       kvm_write_c0_guest_pagemask(cop0,
-                                   regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
-       kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
-       kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
-
-       return 0;
-}
-
-static int
-kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
-       regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
-       regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
-       regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
-       regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
-
-       regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
-       regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
-       regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
-           kvm_read_c0_guest_pagemask(cop0);
-       regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
-       regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
-
-       regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
-       regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
-       regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
-       regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
-       regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
-
-       return 0;
-}
-
 static int kvm_trap_emul_vm_init(struct kvm *kvm)
 {
        return 0;
@@ -471,8 +423,6 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
        .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
        .irq_deliver = kvm_mips_irq_deliver_cb,
        .irq_clear = kvm_mips_irq_clear_cb,
-       .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
-       .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
 };
 
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
index 9861c8669fab4047da01ad6af587ef948817d169..850821df924c32d069acbce03607b832c3a0c48b 100644 (file)
@@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get resource\n");
-               return -ENOMEM;
-       }
 
        /* remap gptu register range */
        gptu_membase = devm_ioremap_resource(&pdev->dev, res);
@@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev)
        if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
                dev_err(&pdev->dev, "Failed to find magic\n");
                gptu_hwexit();
+               clk_disable(clk);
+               clk_put(clk);
                return -ENAVAIL;
        }
 
index 8a12d00908e024ab3559681955182f7f216c0155..32b9f21bfd8562f37d8e51e1ad23908c320ad3e8 100644 (file)
@@ -11,7 +11,6 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
-#include <asm/mmu_context.h>
 
 static inline const char *msk2str(unsigned int mask)
 {
@@ -56,7 +55,7 @@ static void dump_tlb(int first, int last)
        s_pagemask = read_c0_pagemask();
        s_entryhi = read_c0_entryhi();
        s_index = read_c0_index();
-       asid = ASID_MASK(s_entryhi);
+       asid = s_entryhi & 0xff;
 
        for (i = first; i <= last; i++) {
                write_c0_index(i);
@@ -86,7 +85,7 @@ static void dump_tlb(int first, int last)
 
                        printk("va=%0*lx asid=%02lx\n",
                               width, (entryhi & ~0x1fffUL),
-                              ASID_MASK(entryhi));
+                              entryhi & 0xff);
                        printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
                               width,
                               (entrylo0 << 6) & PAGE_MASK, c0,
index 8327698b99377e0e78c74a7bf0bef19e657fbd53..91615c2ef0cf969baeff215ca3d8a627e3851d2f 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/mm.h>
 
 #include <asm/mipsregs.h>
-#include <asm/mmu_context.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
@@ -22,7 +21,7 @@ static void dump_tlb(int first, int last)
        unsigned int asid;
        unsigned long entryhi, entrylo0;
 
-       asid = ASID_MASK(read_c0_entryhi());
+       asid = read_c0_entryhi() & 0xfc0;
 
        for (i = first; i <= last; i++) {
                write_c0_index(i<<8);
@@ -36,7 +35,7 @@ static void dump_tlb(int first, int last)
 
                /* Unused entries have a virtual address of KSEG0.  */
                if ((entryhi & 0xffffe000) != 0x80000000
-                   && (ASID_MASK(entryhi) == asid)) {
+                   && (entryhi & 0xfc0) == asid) {
                        /*
                         * Only print entries in use
                         */
@@ -45,7 +44,7 @@ static void dump_tlb(int first, int last)
                        printk("va=%08lx asid=%08lx"
                               "  [pa=%06lx n=%d d=%d v=%d g=%d]",
                               (entryhi & 0xffffe000),
-                              ASID_MASK(entryhi),
+                              entryhi & 0xfc0,
                               entrylo0 & PAGE_MASK,
                               (entrylo0 & (1 << 11)) ? 1 : 0,
                               (entrylo0 & (1 << 10)) ? 1 : 0,
index 35c8c64684941603f29c072478fd42b7401a7be8..65bfbb5d06f442efbd6601600097f94124cc171c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/pm.h>
 
+#include <asm/idle.h>
 #include <asm/reboot.h>
 
 #include <loongson.h>
index d4f610f9604a27296a685ea1455bbd18717b48be..547f34b69e4c06a471032ab588b8391e62f0cc39 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/io.h>
 #include <linux/pm.h>
+#include <asm/idle.h>
 #include <asm/reboot.h>
 
 #include <loongson1.h>
index 4a13c150f31b18d3317c9e0e7e12ffa39bb561e4..a63d1ed0827fefe36520b2d21877b5bd6a6767f4 100644 (file)
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
 #endif
 
        local_irq_save(flags);
-       old_ctx = ASID_MASK(read_c0_entryhi());
+       old_ctx = read_c0_entryhi() & ASID_MASK;
        write_c0_entrylo0(0);
        entry = r3k_have_wired_reg ? read_c0_wired() : 8;
        for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
 #ifdef DEBUG_TLB
                printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
-                       ASID_MASK(cpu_context(cpu, mm)), start, end);
+                       cpu_context(cpu, mm) & ASID_MASK, start, end);
 #endif
                local_irq_save(flags);
                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                if (size <= current_cpu_data.tlbsize) {
-                       int oldpid = ASID_MASK(read_c0_entryhi());
-                       int newpid = ASID_MASK(cpu_context(cpu, mm));
+                       int oldpid = read_c0_entryhi() & ASID_MASK;
+                       int newpid = cpu_context(cpu, mm) & ASID_MASK;
 
                        start &= PAGE_MASK;
                        end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 #ifdef DEBUG_TLB
                printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
 #endif
-               newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
+               newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
                page &= PAGE_MASK;
                local_irq_save(flags);
-               oldpid = ASID_MASK(read_c0_entryhi());
+               oldpid = read_c0_entryhi() & ASID_MASK;
                write_c0_entryhi(page | newpid);
                BARRIER;
                tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = ASID_MASK(read_c0_entryhi());
+       pid = read_c0_entryhi() & ASID_MASK;
 
 #ifdef DEBUG_TLB
-       if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+       if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
                printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
                       (cpu_context(cpu, vma->vm_mm)), pid);
        }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 
                local_irq_save(flags);
                /* Save old context and create impossible VPN2 value */
-               old_ctx = ASID_MASK(read_c0_entryhi());
+               old_ctx = read_c0_entryhi() & ASID_MASK;
                old_pagemask = read_c0_pagemask();
                w = read_c0_wired();
                write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 #endif
 
                local_irq_save(flags);
-               old_ctx = ASID_MASK(read_c0_entryhi());
+               old_ctx = read_c0_entryhi() & ASID_MASK;
                write_c0_entrylo0(entrylo0);
                write_c0_entryhi(entryhi);
                write_c0_index(wired);
index 09653b290d53356517607ac51388a09412e3d033..c643de4c473a8d67115c7f0d304ebe1dc1e8c4ce 100644 (file)
@@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 
        ENTER_CRITICAL(flags);
 
-       pid = ASID_MASK(read_c0_entryhi());
+       pid = read_c0_entryhi() & ASID_MASK;
        address &= (PAGE_MASK << 1);
        write_c0_entryhi(address | pid);
        pgdp = pgd_offset(vma->vm_mm, address);
index 122f9207f49e7f58871cda1bb681f73370fd1286..91c2499f806a25809259a0b9682667ce2d7f31d5 100644 (file)
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = ASID_MASK(read_c0_entryhi());
+       pid = read_c0_entryhi() & ASID_MASK;
 
        local_irq_save(flags);
        address &= PAGE_MASK;
index 4d46d37875765a3d3024bdee0d043fae9b7a23f7..afeef93f81a79829ec564eaa8ddafebd4ed7e377 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/init.h>
 #include <linux/cache.h>
 
-#include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 #include <asm/war.h>
@@ -302,82 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata;
 static struct uasm_label labels[128] __cpuinitdata;
 static struct uasm_reloc relocs[128] __cpuinitdata;
 
-#ifdef CONFIG_64BIT
-static int check_for_high_segbits __cpuinitdata;
-#endif
-
-static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
-                                       unsigned int i_const)
-{
-       unsigned int **p;
-
-       for (p = start; p < stop; p++) {
-#ifndef CONFIG_CPU_MICROMIPS
-               unsigned int *ip;
-
-               ip = *p;
-               *ip = (*ip & 0xffff0000) | i_const;
-#else
-               unsigned short *ip;
-
-               ip = ((unsigned short *)((unsigned int)*p - 1));
-               if ((*ip & 0xf000) == 0x4000) {
-                       *ip &= 0xfff1;
-                       *ip |= (i_const << 1);
-               } else if ((*ip & 0xf000) == 0x6000) {
-                       *ip &= 0xfff1;
-                       *ip |= ((i_const >> 2) << 1);
-               } else {
-                       ip++;
-                       *ip = i_const;
-               }
-#endif
-               local_flush_icache_range((unsigned long)ip,
-                                        (unsigned long)ip + sizeof(*ip));
-       }
-}
-
-#define asid_insn_fixup(section, const)                                        \
-do {                                                                   \
-       extern unsigned int *__start_ ## section;                       \
-       extern unsigned int *__stop_ ## section;                        \
-       insn_fixup(&__start_ ## section, &__stop_ ## section, const);   \
-} while(0)
-
-/*
- * Caller is assumed to flush the caches before the first context switch.
- */
-static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
-                                unsigned int version_mask,
-                                unsigned int first_version)
-{
-       extern asmlinkage void handle_ri_rdhwr_vivt(void);
-       unsigned long *vivt_exc;
-
-#ifdef CONFIG_CPU_MICROMIPS
-       /*
-        * Worst case optimised microMIPS addiu instructions support
-        * only a 3-bit immediate value.
-        */
-       if(inc > 7)
-               panic("Invalid ASID increment value!");
-#endif
-       asid_insn_fixup(__asid_inc, inc);
-       asid_insn_fixup(__asid_mask, mask);
-       asid_insn_fixup(__asid_version_mask, version_mask);
-       asid_insn_fixup(__asid_first_version, first_version);
-
-       /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
-       vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
-#ifdef CONFIG_CPU_MICROMIPS
-       vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
-#endif
-       vivt_exc++;
-       *vivt_exc = (*vivt_exc & ~mask) | mask;
-
-       current_cpu_data.asid_cache = first_version;
-}
-
 static int check_for_high_segbits __cpuinitdata;
 
 static unsigned int kscratch_used_mask __cpuinitdata;
@@ -2256,7 +2179,6 @@ void __cpuinit build_tlb_refill_handler(void)
        case CPU_TX3922:
        case CPU_TX3927:
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-               setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
                if (cpu_has_local_ebase)
                        build_r3000_tlb_refill_handler();
                if (!run_once) {
@@ -2282,11 +2204,6 @@ void __cpuinit build_tlb_refill_handler(void)
                break;
 
        default:
-#ifndef CONFIG_MIPS_MT_SMTC
-               setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
-#else
-               setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
-#endif
                if (!run_once) {
                        scratch_reg = allocate_kscratch();
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
index af319143b59187a4918805f737e2dcdb480845e7..eaa99d28cb8eddbf1fe0311740f22054bb4e15ad 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/pm.h>
 #include <linux/bootmem.h>
 
+#include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <asm/bootinfo.h>
index e3e094100e3e51f2ea879431f85a9512d84e22a4..89c8c1066632b5f2800c1a4f2ecaef212ac890cf 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/serial_8250.h>
 #include <linux/pm.h>
 
+#include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <asm/bootinfo.h>
index 0edb89a6351610480833ee81d5e996b45ee3ea45..1c98975316604aa27517afec87cb450bf459d6bd 100644 (file)
@@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c)
        return 0; /* foo */
 }
 
-static inline int str2eaddr(unsigned char *ea, unsigned char *str)
+int str2eaddr(unsigned char *ea, unsigned char *str)
 {
        int index = 0;
        unsigned char num = 0;
index 1651cfdbfe7b1303c03b629171190e8df013ad16..396b2967ad856bb974da6ff8055d27037cd9b654 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <asm/bootinfo.h>
 #include <asm/cacheflush.h>
+#include <asm/idle.h>
 #include <asm/r4kcache.h>
 #include <asm/reboot.h>
 #include <asm/smp-ops.h>
index ef7da1e227e61309c990548fa6ed93b259fe0040..e3203d414fee331eecce025924b0a8e7c4cee362 100644 (file)
                        reg-shift = <2>;
                };
        };
+
+       usb@101c0000 {
+               compatible = "ralink,rt3050-usb", "snps,dwc2";
+               reg = <0x101c0000 40000>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <18>;
+
+               status = "disabled";
+       };
 };
index c18c9a84f4c4ee5794088f133332d3a391b834b7..0ac73ea281984909df89403d17fb82fd4e9f3ae4 100644 (file)
@@ -43,4 +43,8 @@
                        reg = <0x50000 0x7b0000>;
                };
        };
+
+       usb@101c0000 {
+               status = "ok";
+       };
 };
index fb1569580def7c25a03a7f1348315c4e93e92e3c..6b5f3406f414cd85de2230398a17ed9f7e047b3c 100644 (file)
@@ -88,7 +88,7 @@ void __init plat_mem_setup(void)
        __dt_setup_arch(&__dtb_start);
 
        if (soc_info.mem_size)
-               add_memory_region(soc_info.mem_base, soc_info.mem_size,
+               add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
                                  BOOT_MEM_RAM);
        else
                detect_memory_region(soc_info.mem_base,
index 5364aabc21027951532bc5d0e7d97898344f701a..681e7f86c08000f29884ce369efafab86afe1b29 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/irq.h>
 #include <asm/bootinfo.h>
+#include <asm/idle.h>
 #include <asm/time.h>
 #include <asm/reboot.h>
 #include <asm/r4kcache.h>
index 729a50991780f7e6aceb022012e4278993c5a6a7..b7eccbd17bf7e72d3ec78250ecc0bc80f606b504 100644 (file)
@@ -331,7 +331,8 @@ static int tx4939_netdev_event(struct notifier_block *this,
                               unsigned long event,
                               void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
        if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) {
                __u64 bit = 0;
                if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(0))
index 70a3f90131d82af9d67bac9ff2d8d206812dde6b..d7f755833c3f43431b48f38b296da4df34d993e6 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cpu.h>
+#include <asm/idle.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/reboot.h>
index cc5474b24f0657f6b51091d6f2779427a03d3e78..80beb188ed476ebb2775db8a89673bd1de95f631 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 
 #include <asm/cacheflush.h>
+#include <asm/idle.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
 
index 8137c25c4e15912841f702a655b05677d0c90a04..6f31cc0f1a878139dc4ac233d89f8f1d22f93379 100644 (file)
@@ -103,4 +103,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
        return channel ? 15 : 14;
 }
 
+#include <asm-generic/pci_iomap.h>
+
 #endif /* _ASM_PCI_H */
index b4ce844c9391c82dd36136c86a17141e6b5a93c0..db80fd3e398b8abf0321db0d08f9d1ec248cb001 100644 (file)
@@ -74,4 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_SOCKET_H */
index 68fcab8f8f6f5eadebd5a18715034d6b33c8d781..222152a3f75195b429a6794e036c279cdb2e6160 100644 (file)
@@ -60,6 +60,7 @@ ENTRY(ret_from_kernel_thread)
        mov     (REG_D0,fp),d0
        mov     (REG_A0,fp),a0
        calls   (a0)
+       GET_THREAD_INFO a2              # A2 must be set on return from sys_exit()
        clr     d0
        mov     d0,(REG_D0,fp)
        jmp     syscall_exit
@@ -107,10 +108,10 @@ syscall_exit_work:
        and     EPSW_nSL,d0
        beq     resume_kernel           # returning to supervisor mode
 
-       btst    _TIF_SYSCALL_TRACE,d2
-       beq     work_pending
        LOCAL_IRQ_ENABLE                # could let syscall_trace_exit() call
                                        # schedule() instead
+       btst    _TIF_SYSCALL_TRACE,d2
+       beq     work_pending
        mov     fp,d0
        call    syscall_trace_exit[],0  # do_syscall_trace(regs)
        jmp     resume_userspace
@@ -123,6 +124,7 @@ work_pending:
 work_resched:
        call    schedule[],0
 
+resume_userspace:
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
        LOCAL_IRQ_DISABLE
@@ -131,6 +133,8 @@ work_resched:
        mov     (TI_flags,a2),d2
        btst    _TIF_WORK_MASK,d2
        beq     restore_all
+
+       LOCAL_IRQ_ENABLE
        btst    _TIF_NEED_RESCHED,d2
        bne     work_resched
 
@@ -169,17 +173,6 @@ ret_from_intr:
        and     EPSW_nSL,d0
        beq     resume_kernel           # returning to supervisor mode
 
-ENTRY(resume_userspace)
-       # make sure we don't miss an interrupt setting need_resched or
-       # sigpending between sampling and the rti
-       LOCAL_IRQ_DISABLE
-
-       # is there any work to be done on int/exception return?
-       mov     (TI_flags,a2),d2
-       btst    _TIF_WORK_MASK,d2
-       bne     work_pending
-       jmp     restore_all
-
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
        LOCAL_IRQ_DISABLE
index 1adcf024bb9a4c2221ae93bce8558d85c0b64abb..e37fac0461f3351ae6284ca011f3fcb0d80cc906 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <asm/io.h>
+#include <asm/irq.h>
 #include "pci-asb2305.h"
 
 unsigned int pci_probe = 1;
index cad060f288cf51e089bfb9cf7b6e879064bc0989..6507dabdd5ddc1a758e68f7476eb197fac9ec7b3 100644 (file)
@@ -245,7 +245,7 @@ config SMP
 
 config IRQSTACKS
        bool "Use separate kernel stacks when processing interrupts"
-       default n
+       default y
        help
          If you say Y here the kernel will use separate kernel stacks
          for handling hard and soft interrupts.  This can help avoid
index 2f967cc6649e0cab325136624a614af4f0784281..96ec3982be8d37271b6e3236d00d3a0f3f5ce4d1 100644 (file)
@@ -23,24 +23,21 @@ NM          = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
 LIBGCC         = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
-MACHINE                := $(shell uname -m)
-NATIVE         := $(if $(filter parisc%,$(MACHINE)),1,0)
-
 ifdef CONFIG_64BIT
 UTS_MACHINE    := parisc64
 CHECKFLAGS     += -D__LP64__=1 -m64
-WIDTH          := 64
+CC_ARCHES      = hppa64
 else # 32-bit
-WIDTH          :=
+CC_ARCHES      = hppa hppa2.0 hppa1.1
 endif
 
-# attempt to help out folks who are cross-compiling
-ifeq ($(NATIVE),1)
-CROSS_COMPILE  := hppa$(WIDTH)-linux-
-else
- ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := hppa$(WIDTH)-linux-gnu-
- endif
+ifneq ($(SUBARCH),$(UTS_MACHINE))
+       ifeq ($(CROSS_COMPILE),)
+               CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+               CROSS_COMPILE := $(call cc-cross-prefix, \
+                       $(foreach a,$(CC_ARCHES), \
+                       $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
      endif
 endif
 
 OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
@@ -69,7 +66,7 @@ KBUILD_CFLAGS_KERNEL += -mlong-calls
 endif
 
 # select which processor to optimise for
-cflags-$(CONFIG_PA7100)                += -march=1.1 -mschedule=7100
+cflags-$(CONFIG_PA7000)                += -march=1.1 -mschedule=7100
 cflags-$(CONFIG_PA7200)                += -march=1.1 -mschedule=7200
 cflags-$(CONFIG_PA7100LC)      += -march=1.1 -mschedule=7100LC
 cflags-$(CONFIG_PA7300LC)      += -march=1.1 -mschedule=7300
index 89fb40005e3f81e875ed0111952e7941282832cf..0da848232344fc41d9583cc10f8510a54a7d6a16 100644 (file)
        SAVE_SP  (%sr4, PT_SR4 (\regs))
        SAVE_SP  (%sr5, PT_SR5 (\regs))
        SAVE_SP  (%sr6, PT_SR6 (\regs))
-       SAVE_SP  (%sr7, PT_SR7 (\regs))
 
        SAVE_CR  (%cr17, PT_IASQ0(\regs))
        mtctl    %r0,   %cr17
index 12373c4dababec920c0ec42f120064011567d4b6..241c34518465d9404f56b786a86554a1e89824a3 100644 (file)
 #include <linux/threads.h>
 #include <linux/irq.h>
 
+#ifdef CONFIG_IRQSTACKS
+#define __ARCH_HAS_DO_SOFTIRQ
+#endif
+
 typedef struct {
        unsigned int __softirq_pending;
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
        unsigned int kernel_stack_usage;
-#endif
+       unsigned int irq_stack_usage;
 #ifdef CONFIG_SMP
        unsigned int irq_resched_count;
        unsigned int irq_call_count;
 #endif
+       unsigned int irq_unaligned_count;
+       unsigned int irq_fpassist_count;
        unsigned int irq_tlb_count;
 } ____cacheline_aligned irq_cpustat_t;
 
@@ -28,6 +33,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 #define __ARCH_IRQ_STAT
 #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
 #define inc_irq_stat(member)   this_cpu_inc(irq_stat.member)
+#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
 #define local_softirq_pending()        this_cpu_read(irq_stat.__softirq_pending)
 
 #define __ARCH_SET_SOFTIRQ_PENDING
index 0e625ab9aaec90df20e3a5059ad792cd8f293a10..cc50d33b7b881b7c57f7a46c125a56860840e514 100644 (file)
@@ -39,17 +39,14 @@ extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
 static inline int pfn_to_nid(unsigned long pfn)
 {
        unsigned int i;
-       unsigned char r;
 
        if (unlikely(pfn_is_io(pfn)))
                return 0;
 
        i = pfn >> PFNNID_SHIFT;
        BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
-       r = pfnnid_map[i];
-       BUG_ON(r == 0xff);
 
-       return (int)r;
+       return (int)pfnnid_map[i];
 }
 
 static inline int pfn_valid(int pfn)
index 064015547d1e32be0a968ba946bc54f8db362895..cc2290a3cace1e0d65f8e69be0138c0c09cc8b2a 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/ptrace.h>
 #include <asm/types.h>
 #include <asm/percpu.h>
-
 #endif /* __ASSEMBLY__ */
 
 /*
 
 #ifndef __ASSEMBLY__
 
-/*
- * IRQ STACK - used for irq handler
- */
-#ifdef __KERNEL__
-
-#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
-
-union irq_stack_union {
-       unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
-};
-
-DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
-
-void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
-
-#endif /* __KERNEL__ */
-
 /*
  * Data detected about CPUs at boot time which is the same for all CPU's.
  * HP boxes are SMP - ie identical processors.
index 70c512a386f7df86d5f086553322d40b0baa894a..f866fff9a00411c7d45111bd9f780a53846615b5 100644 (file)
@@ -73,6 +73,8 @@
 
 #define SO_SELECT_ERR_QUEUE    0x4026
 
+#define SO_LL                  0x4027
+
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
index 5709c5e59be82c14c544bd06ad693c071a31c434..14285caec71a4360b5b061f87a43077675f0ce1e 100644 (file)
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
 static void setup_bus_id(struct parisc_device *padev)
 {
        struct hardware_path path;
-       char name[20];
+       char name[28];
        char *output = name;
        int i;
 
index 4bb96ad9b0b14ddd1cb066bc21b304d861059f38..e8f07dd2840186a934632104fdc3116fa7293506 100644 (file)
        rsm     PSW_SM_I, %r0   /* barrier for "Relied upon Translation */
        mtsp    %r0, %sr4
        mtsp    %r0, %sr5
-       mfsp    %sr7, %r1
-       or,=    %r0,%r1,%r0     /* Only save sr7 in sr3 if sr7 != 0 */
-       mtsp    %r1, %sr3
+       mtsp    %r0, %sr6
        tovirt_r1 %r29
        load32  KERNEL_PSW, %r1
 
        rsm     PSW_SM_QUIET,%r0        /* second "heavy weight" ctl op */
-       mtsp    %r0, %sr6
-       mtsp    %r0, %sr7
        mtctl   %r0, %cr17      /* Clear IIASQ tail */
        mtctl   %r0, %cr17      /* Clear IIASQ head */
        mtctl   %r1, %ipsw
 
        /* we save the registers in the task struct */
 
+       copy    %r30, %r17
        mfctl   %cr30, %r1
+       ldo     THREAD_SZ_ALGN(%r1), %r30
+       mtsp    %r0,%sr7
+       mtsp    %r16,%sr3
        tophys  %r1,%r9
        LDREG   TI_TASK(%r9), %r1       /* thread_info -> task_struct */
        tophys  %r1,%r9
        ldo     TASK_REGS(%r9),%r9
-       STREG   %r30, PT_GR30(%r9)
+       STREG   %r17,PT_GR30(%r9)
        STREG   %r29,PT_GR29(%r9)
        STREG   %r26,PT_GR26(%r9)
+       STREG   %r16,PT_SR7(%r9)
        copy    %r9,%r29
-       mfctl   %cr30, %r1
-       ldo     THREAD_SZ_ALGN(%r1), %r30
        .endm
 
        .macro  get_stack_use_r30
        /* we put a struct pt_regs on the stack and save the registers there */
 
        tophys  %r30,%r9
-       STREG   %r30,PT_GR30(%r9)
+       copy    %r30,%r1
        ldo     PT_SZ_ALGN(%r30),%r30
+       STREG   %r1,PT_GR30(%r9)
        STREG   %r29,PT_GR29(%r9)
        STREG   %r26,PT_GR26(%r9)
+       STREG   %r16,PT_SR7(%r9)
        copy    %r9,%r29
        .endm
 
        L2_ptep         \pgd,\pte,\index,\va,\fault
        .endm
 
+       /* Acquire pa_dbit_lock lock. */
+       .macro          dbit_lock       spc,tmp,tmp1
+#ifdef CONFIG_SMP
+       cmpib,COND(=),n 0,\spc,2f
+       load32          PA(pa_dbit_lock),\tmp
+1:     LDCW            0(\tmp),\tmp1
+       cmpib,COND(=)   0,\tmp1,1b
+       nop
+2:
+#endif
+       .endm
+
+       /* Release pa_dbit_lock lock without reloading lock address. */
+       .macro          dbit_unlock0    spc,tmp
+#ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       stw             \spc,0(\tmp)
+#endif
+       .endm
+
+       /* Release pa_dbit_lock lock. */
+       .macro          dbit_unlock1    spc,tmp
+#ifdef CONFIG_SMP
+       load32          PA(pa_dbit_lock),\tmp
+       dbit_unlock0    \spc,\tmp
+#endif
+       .endm
+
        /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
         * don't needlessly dirty the cache line if it was already set */
-       .macro          update_ptep     ptep,pte,tmp,tmp1
+       .macro          update_ptep     spc,ptep,pte,tmp,tmp1
+#ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       LDREG           0(\ptep),\pte
+#endif
        ldi             _PAGE_ACCESSED,\tmp1
        or              \tmp1,\pte,\tmp
        and,COND(<>)    \tmp1,\pte,%r0
 
        /* Set the dirty bit (and accessed bit).  No need to be
         * clever, this is only used from the dirty fault */
-       .macro          update_dirty    ptep,pte,tmp
+       .macro          update_dirty    spc,ptep,pte,tmp
+#ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       LDREG           0(\ptep),\pte
+#endif
        ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
        or              \tmp,\pte,\pte
        STREG           \pte,0(\ptep)
@@ -1111,11 +1148,13 @@ dtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1135,11 +1174,13 @@ nadtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1161,7 +1202,8 @@ dtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1172,6 +1214,7 @@ dtlb_miss_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1192,7 +1235,8 @@ nadtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1204,6 +1248,7 @@ nadtlb_miss_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1224,13 +1269,15 @@ dtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0
 
        idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1250,13 +1297,15 @@ nadtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0
        
         idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1357,11 +1406,13 @@ itlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,itlb_fault
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1379,11 +1430,13 @@ naitlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1405,7 +1458,8 @@ itlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1416,6 +1470,7 @@ itlb_miss_11:
        iitlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1427,7 +1482,8 @@ naitlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1438,6 +1494,7 @@ naitlb_miss_11:
        iitlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1459,13 +1516,15 @@ itlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0  
 
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1477,13 +1536,15 @@ naitlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0
 
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1507,29 +1568,13 @@ dbit_trap_20w:
 
        L3_ptep         ptp,pte,t0,va,dbit_fault
 
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nolock_20w
-       load32          PA(pa_dbit_lock),t0
-
-dbit_spin_20w:
-       LDCW            0(t0),t1
-       cmpib,COND(=)         0,t1,dbit_spin_20w
-       nop
-
-dbit_nolock_20w:
-#endif
-       update_dirty    ptp,pte,t1
+       dbit_lock       spc,t0,t1
+       update_dirty    spc,ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
                
        idtlbt          pte,prot
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nounlock_20w
-       ldi             1,t1
-       stw             t1,0(t0)
-
-dbit_nounlock_20w:
-#endif
+       dbit_unlock0    spc,t0
 
        rfir
        nop
@@ -1543,18 +1588,8 @@ dbit_trap_11:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nolock_11
-       load32          PA(pa_dbit_lock),t0
-
-dbit_spin_11:
-       LDCW            0(t0),t1
-       cmpib,=         0,t1,dbit_spin_11
-       nop
-
-dbit_nolock_11:
-#endif
-       update_dirty    ptp,pte,t1
+       dbit_lock       spc,t0,t1
+       update_dirty    spc,ptp,pte,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1565,13 +1600,7 @@ dbit_nolock_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t1, %sr1     /* Restore sr1 */
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nounlock_11
-       ldi             1,t1
-       stw             t1,0(t0)
-
-dbit_nounlock_11:
-#endif
+       dbit_unlock0    spc,t0
 
        rfir
        nop
@@ -1583,32 +1612,15 @@ dbit_trap_20:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nolock_20
-       load32          PA(pa_dbit_lock),t0
-
-dbit_spin_20:
-       LDCW            0(t0),t1
-       cmpib,=         0,t1,dbit_spin_20
-       nop
-
-dbit_nolock_20:
-#endif
-       update_dirty    ptp,pte,t1
+       dbit_lock       spc,t0,t1
+       update_dirty    spc,ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t1
        
         idtlbt          pte,prot
-
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nounlock_20
-       ldi             1,t1
-       stw             t1,0(t0)
-
-dbit_nounlock_20:
-#endif
+       dbit_unlock0    spc,t0
 
        rfir
        nop
index f7752f6af29e090e559d74e0d8d1206bd50a72ef..9e2d2e408529f744b7bf5b7830cec15ad48a6012 100644 (file)
@@ -222,6 +222,7 @@ static struct hp_hardware hp_hardware_list[] = {
        {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
        {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
        {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+       {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"},
        {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
        {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
        {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
index e255db0bb7619cf92e8581cae017f01f20c4718d..2e6443b1e9228426ba94d8602c8956c5daec93c2 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
-#include <linux/spinlock.h>
 #include <linux/types.h>
 #include <asm/io.h>
 
 #include <asm/smp.h>
+#include <asm/ldcw.h>
 
 #undef PARISC_IRQ_CR16_COUNTS
 
@@ -166,22 +166,36 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        seq_printf(p, "%*s: ", prec, "STK");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
-       seq_printf(p, "  Kernel stack usage\n");
+       seq_puts(p, "  Kernel stack usage\n");
+# ifdef CONFIG_IRQSTACKS
+       seq_printf(p, "%*s: ", prec, "IST");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
+       seq_puts(p, "  Interrupt stack usage\n");
+# endif
 #endif
 #ifdef CONFIG_SMP
        seq_printf(p, "%*s: ", prec, "RES");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
-       seq_printf(p, "  Rescheduling interrupts\n");
+       seq_puts(p, "  Rescheduling interrupts\n");
        seq_printf(p, "%*s: ", prec, "CAL");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
-       seq_printf(p, "  Function call interrupts\n");
+       seq_puts(p, "  Function call interrupts\n");
 #endif
+       seq_printf(p, "%*s: ", prec, "UAH");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
+       seq_puts(p, "  Unaligned access handler traps\n");
+       seq_printf(p, "%*s: ", prec, "FPA");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
+       seq_puts(p, "  Floating point assist traps\n");
        seq_printf(p, "%*s: ", prec, "TLB");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
-       seq_printf(p, "  TLB shootdowns\n");
+       seq_puts(p, "  TLB shootdowns\n");
        return 0;
 }
 
@@ -366,6 +380,24 @@ static inline int eirr_to_irq(unsigned long eirr)
        return (BITS_PER_LONG - bit) + TIMER_IRQ;
 }
 
+#ifdef CONFIG_IRQSTACKS
+/*
+ * IRQ STACK - used for irq handler
+ */
+#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
+
+union irq_stack_union {
+       unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+       volatile unsigned int slock[4];
+       volatile unsigned int lock[1];
+};
+
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+               .slock = { 1,1,1,1 },
+       };
+#endif
+
+
 int sysctl_panic_on_stackoverflow = 1;
 
 static inline void stack_overflow_check(struct pt_regs *regs)
@@ -378,6 +410,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
        unsigned long sp = regs->gr[30];
        unsigned long stack_usage;
        unsigned int *last_usage;
+       int cpu = smp_processor_id();
 
        /* if sr7 != 0, we interrupted a userspace process which we do not want
         * to check for stack overflow. We will only check the kernel stack. */
@@ -386,7 +419,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 
        /* calculate kernel stack usage */
        stack_usage = sp - stack_start;
-       last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
+#ifdef CONFIG_IRQSTACKS
+       if (likely(stack_usage <= THREAD_SIZE))
+               goto check_kernel_stack; /* found kernel stack */
+
+       /* check irq stack usage */
+       stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
+       stack_usage = sp - stack_start;
+
+       last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
+       if (unlikely(stack_usage > *last_usage))
+               *last_usage = stack_usage;
+
+       if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
+               return;
+
+       pr_emerg("stackcheck: %s will most likely overflow irq stack "
+                "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+               current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
+       goto panic_check;
+
+check_kernel_stack:
+#endif
+
+       /* check kernel stack usage */
+       last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
 
        if (unlikely(stack_usage > *last_usage))
                *last_usage = stack_usage;
@@ -398,31 +455,66 @@ static inline void stack_overflow_check(struct pt_regs *regs)
                 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
                current->comm, sp, stack_start, stack_start + THREAD_SIZE);
 
+#ifdef CONFIG_IRQSTACKS
+panic_check:
+#endif
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
 #endif
 }
 
 #ifdef CONFIG_IRQSTACKS
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
+/* in entry.S: */
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
 
 static void execute_on_irq_stack(void *func, unsigned long param1)
 {
-       unsigned long *irq_stack_start;
+       union irq_stack_union *union_ptr;
        unsigned long irq_stack;
-       int cpu = smp_processor_id();
+       volatile unsigned int *irq_stack_in_use;
+
+       union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
+       irq_stack = (unsigned long) &union_ptr->stack;
+       irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
+                        64); /* align for stack frame usage */
 
-       irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
-       irq_stack = (unsigned long) irq_stack_start;
-       irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
+       /* We may be called recursive. If we are already using the irq stack,
+        * just continue to use it. Use spinlocks to serialize
+        * the irq stack usage.
+        */
+       irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
+       if (!__ldcw(irq_stack_in_use)) {
+               void (*direct_call)(unsigned long p1) = func;
 
-       BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
-       *irq_stack_start = 1;
+               /* We are using the IRQ stack already.
+                * Do direct call on current stack. */
+               direct_call(param1);
+               return;
+       }
 
        /* This is where we switch to the IRQ stack. */
        call_on_stack(param1, func, irq_stack);
 
-       *irq_stack_start = 0;
+       /* free up irq stack usage. */
+       *irq_stack_in_use = 1;
+}
+
+asmlinkage void do_softirq(void)
+{
+       __u32 pending;
+       unsigned long flags;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       pending = local_softirq_pending();
+
+       if (pending)
+               execute_on_irq_stack(__do_softirq, 0);
+
+       local_irq_restore(flags);
 }
 #endif /* CONFIG_IRQSTACKS */
 
index 5e1de6072be57f0c92ce04950284fe7f10c5bf4c..36d7f402e48edb8b8cb13dea9e302d1e84edfdf9 100644 (file)
@@ -605,14 +605,14 @@ ENTRY(copy_user_page_asm)
        convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
        convert_phys_for_tlb_insert20 %r23      /* convert phys addr to tlb insert format */
        depd            %r24,63,22, %r28        /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
+       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
        copy            %r28, %r29
        depdi           1, 41,1, %r29           /* Form aliased virtual address 'from' */
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        extrw,u         %r23, 24,25, %r23       /* convert phys addr to tlb insert format */
        depw            %r24, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
        copy            %r28, %r29
        depwi           1, 9,1, %r29            /* Form aliased virtual address 'from' */
 #endif
@@ -762,7 +762,7 @@ ENTRY(clear_user_page_asm)
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #endif
 
        /* Purge any old translation */
@@ -846,7 +846,7 @@ ENTRY(flush_dcache_page_asm)
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #endif
 
        /* Purge any old translation */
@@ -918,11 +918,11 @@ ENTRY(flush_icache_page_asm)
 #endif
        convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
        depd            %r25, 63,22, %r28       /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
+       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #endif
 
        /* Purge any old translation */
index 76b63e726a539ee912bea1077d00370603e1f538..1e95b2000ce85650903e85a705206ed113540670 100644 (file)
@@ -69,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
                /* called from hpux boot loader */
                boot_command_line[0] = '\0';
        } else {
-               strcpy(boot_command_line, (char *)__va(boot_args[1]));
+               strlcpy(boot_command_line, (char *)__va(boot_args[1]),
+                       COMMAND_LINE_SIZE);
 
 #ifdef CONFIG_BLK_DEV_INITRD
                if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
index fe41a98043bbcf287e3ee3562972d6b693e140d0..04e47c6a45626347aa261d3725005cdafb9385ad 100644 (file)
@@ -646,6 +646,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
        case 14:
                /* Assist Exception Trap, i.e. floating point exception. */
                die_if_kernel("Floating point exception", regs, 0); /* quiet */
+               __inc_irq_stat(irq_fpassist_count);
                handle_fpe(regs);
                return;
 
index 234e3682cf0900ef0e55e18e168d1ad5324afd5f..d7c0acb35ec248c51329a5cb4594189bd5f5040f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/signal.h>
 #include <linux/ratelimit.h>
 #include <asm/uaccess.h>
+#include <asm/hardirq.h>
 
 /* #define DEBUG_UNALIGNED 1 */
 
@@ -454,6 +455,8 @@ void handle_unaligned(struct pt_regs *regs)
        struct siginfo si;
        register int flop=0;    /* true if this is a flop */
 
+       __inc_irq_stat(irq_unaligned_count);
+
        /* log a message with pacing */
        if (user_mode(regs)) {
                if (current->thread.flags & PARISC_UAC_SIGBUS) {
index ce939ac8622b84b7278f9e979e68a56354267adf..1c965642068b48d6b7102a5b6adb365f321f73e6 100644 (file)
@@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
 {
        int do_recycle;
 
-       inc_irq_stat(irq_tlb_count);
+       __inc_irq_stat(irq_tlb_count);
        do_recycle = 0;
        spin_lock(&sid_lock);
        if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
 #else
 void flush_tlb_all(void)
 {
-       inc_irq_stat(irq_tlb_count);
+       __inc_irq_stat(irq_tlb_count);
        spin_lock(&sid_lock);
        flush_tlb_all_local(NULL);
        recycle_sids();
index c33e3ad2c8fd52c9e0c31dfc272faf3d34902f37..7754c6b8c453602d890c47d41f005798a6e8b62d 100644 (file)
@@ -674,7 +674,6 @@ config SBUS
 
 config FSL_SOC
        bool
-       select HAVE_CAN_FLEXCAN if NET && CAN
 
 config FSL_PCI
        bool
index 5416e28a753871ec02c75503e9583e8ff5abfbe9..863d877e0b5f7444f31239a2412bdec68165abea 100644 (file)
@@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
          Select this to enable early debugging for the PowerNV platform
          using an "hvsi" console
 
+config PPC_EARLY_DEBUG_MEMCONS
+       bool "In memory console"
+       help
+         Select this to enable early debugging using an in memory console.
+         This console provides input and output buffers stored within the
+         kernel BSS and should be safe to select on any system. A debugger
+         can then be used to read kernel output or send input to the console.
 endchoice
 
+config PPC_MEMCONS_OUTPUT_SIZE
+       int "In memory console output buffer size"
+       depends on PPC_EARLY_DEBUG_MEMCONS
+       default 4096
+       help
+         Selects the size of the output buffer (in bytes) of the in memory
+         console.
+
+config PPC_MEMCONS_INPUT_SIZE
+       int "In memory console input buffer size"
+       depends on PPC_EARLY_DEBUG_MEMCONS
+       default 128
+       help
+         Selects the size of the input buffer (in bytes) of the in memory
+         console.
+
 config PPC_EARLY_DEBUG_OPAL
        def_bool y
        depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
index f79196232917140f0afef3085f53d63e32f5e5d7..139a8308070c8fee09665b81d168ef092e584d3e 100644 (file)
@@ -136,7 +136,6 @@ CONFIG_HID_SMARTJOYPLUS=m
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=m
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=m
 CONFIG_USB_EHCI_HCD=m
 # CONFIG_USB_EHCI_HCD_PPC_OF is not set
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644 (file)
index 0000000..b6f5a33
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
+#define _ASM_POWERPC_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+#define SCHEDULE_USER bl       .schedule_user
+#else
+#define SCHEDULE_USER bl       .schedule
+#endif
+
+#endif
index 26807e5aff5174142ac7ad804cde073b5ba0fc2c..6f3887d884d2a56b538e398a9216da62ef86370b 100644 (file)
@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
 #define CPU_FTR_CFAR                   LONG_ASM_CONST(0x0100000000000000)
 #define        CPU_FTR_HAS_PPR                 LONG_ASM_CONST(0x0200000000000000)
 #define CPU_FTR_DAWR                   LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_DABRX                  LONG_ASM_CONST(0x0800000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
            CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
-           CPU_FTR_HVMODE)
+           CPU_FTR_HVMODE | CPU_FTR_DABRX)
 #define CPU_FTRS_POWER5        (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
-           CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
+           CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
 #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | \
            CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
            CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
-           CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
+           CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+           CPU_FTR_DABRX)
 #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
            CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
            CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
            CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
-           CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR)
+           CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
 #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_PAUSE_ZERO  | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
-           CPU_FTR_UNALIGNED_LD_STD)
+           CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
 #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
-           CPU_FTR_PURR | CPU_FTR_REAL_LE)
+           CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
 #define CPU_FTRS_COMPATIBLE    (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
 
 #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
-                    CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
+                    CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
+                    CPU_FTR_ICSWX | CPU_FTR_DABRX )
 
 #ifdef __powerpc64__
 #ifdef CONFIG_PPC_BOOK3E
index 8e5fae8beaf6ad0958e8188d8094ded008033425..46793b58a761d549d7bf69814530c128352f26a5 100644 (file)
@@ -513,7 +513,7 @@ label##_common:                                                     \
  */
 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr)            \
        EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
-                        FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
+                        FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
 
 /*
  * When the idle code in power4_idle puts the CPU into NAP mode,
index 0df54646f9689d867ff86d3b8f515af2dfa1bfe6..681bc0314b6bb999070d59a3f86955d2bf42dd15 100644 (file)
@@ -52,6 +52,7 @@
 #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
 #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
 #define FW_FEATURE_PRRN                ASM_CONST(0x0000000200000000)
+#define FW_FEATURE_OPALv3      ASM_CONST(0x0000000400000000)
 
 #ifndef __ASSEMBLY__
 
@@ -69,7 +70,8 @@ enum {
                FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
                FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
        FW_FEATURE_PSERIES_ALWAYS = 0,
-       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
+       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
+               FW_FEATURE_OPALv3,
        FW_FEATURE_POWERNV_ALWAYS = 0,
        FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
        FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
index cf4df8e2139af0a1a0c083ba3ad2618b2efc57c4..0c7f2bfcf1348100fb10c4cd6f74dcee34e42756 100644 (file)
 #define H_GET_MPP              0x2D4
 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC
 #define H_BEST_ENERGY          0x2F4
+#define H_XIRR_X               0x2FC
 #define H_RANDOM               0x300
 #define H_COP                  0x304
 #define H_GET_MPP_X            0x314
index d615b28dda82ff7e9afed97ddf51815f5d170cca..ba713f166fa57fb2f9b08002a8a5cf8c0162d196 100644 (file)
@@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void)
 #endif
 
 #define hard_irq_disable()     do {                    \
+       u8 _was_enabled = get_paca()->soft_enabled;     \
        __hard_irq_disable();                           \
-       if (local_paca->soft_enabled)                   \
-               trace_hardirqs_off();                   \
        get_paca()->soft_enabled = 0;                   \
        get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;  \
+       if (_was_enabled)                               \
+               trace_hardirqs_off();                   \
 } while(0)
 
 static inline bool lazy_irq_pending(void)
index b9dd382cb349bd651f214eae662f13556f6c8b5a..851bac7afa4b26e25a860be03ab7fe0783541fca 100644 (file)
 #define BOOKE_INTERRUPT_DEBUG 15
 
 /* E500 */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
+/*
+ * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
+ */
+#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
+                               BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
 #define BOOKE_INTERRUPT_DOORBELL 36
 #define BOOKE_INTERRUPT_HV_SYSCALL 40
 #define BOOKE_INTERRUPT_HV_PRIV 41
 
-/* altivec */
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
-
 /* book3s */
 
 #define BOOK3S_INTERRUPT_SYSTEM_RESET  0x100
index b6c8b58b1d764c5d1d8255fbc62df86b6b88c83e..cbb9305ab15affb2035f723be351e3a7431e2955 100644 (file)
@@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType {
 
 enum OpalThreadStatus {
        OPAL_THREAD_INACTIVE = 0x0,
-       OPAL_THREAD_STARTED = 0x1
+       OPAL_THREAD_STARTED = 0x1,
+       OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
 };
 
 enum OpalPciBusCompare {
@@ -563,6 +564,8 @@ extern void opal_nvram_init(void);
 
 extern int opal_machine_check(struct pt_regs *regs);
 
+extern void opal_shutdown(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_H */
index 8b11b5bd9938b847a5439eb5e13851653af9fe25..2c1d8cb9b26562a295ab6b6016261b0d692df30b 100644 (file)
@@ -174,6 +174,8 @@ struct pci_dn {
 /* Get the pointer to a device_node's pci_dn */
 #define PCI_DN(dn)     ((struct pci_dn *) (dn)->data)
 
+extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
+
 extern void * update_dn_pci_info(struct device_node *dn, void *data);
 
 static inline int pci_device_from_OF_node(struct device_node *np,
index 91acb12bac9280a2a8068cc9cd46176f5cbd1243..b66ae722a8e9c2d9aacb9ef7eb49e553d9ad9dde 100644 (file)
@@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 
 static inline pgtable_t pmd_pgtable(pmd_t pmd)
 {
-       return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE);
+       return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
 }
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
index cea8496091ffbd1cb2437061cb1f7772f3b8bbc3..2f1b6c5f8174f4a4759086f21668dda5c8477b1d 100644 (file)
@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
 #define PPC440EP_ERR42
 #endif
 
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly).  The embedded and server mnemonics for
+ * dcbt are different so we use machine "power4" here explicitly.
+ */
+#define DCBT_STOP_ALL_STREAM_IDS(scratch)      \
+.machine push ;                                        \
+.machine "power4" ;                            \
+       lis     scratch,0x60000000@h;           \
+       dcbt    r0,scratch,0b01010;             \
+.machine pop
+
 /*
  * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
  * keep the address intact to be compatible with code shared with
index d7e67ca8b4a6fb48f26b7a778be4db8873862012..14a658363698ee1d58e0b400b33b3bbc13475dda 100644 (file)
@@ -284,6 +284,12 @@ struct thread_struct {
        unsigned long   ebbrr;
        unsigned long   ebbhr;
        unsigned long   bescr;
+       unsigned long   siar;
+       unsigned long   sdar;
+       unsigned long   sier;
+       unsigned long   mmcr0;
+       unsigned long   mmcr2;
+       unsigned long   mmcra;
 #endif
 };
 
@@ -403,21 +409,16 @@ static inline void prefetchw(const void *x)
 #endif
 
 #ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 {
-       unsigned long sp;
-
        if (is_32)
-               sp = regs->gpr[1] & 0x0ffffffffUL;
-       else
-               sp = regs->gpr[1];
-
+               return sp & 0x0ffffffffUL;
        return sp;
 }
 #else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 {
-       return regs->gpr[1];
+       return sp;
 }
 #endif
 
index 3e13e23e4fdf8a4e3fc84e07ef091cffdfc0492f..d836d945068d032cb072013cf027f7a43e36b88a 100644 (file)
@@ -47,7 +47,7 @@
  * generic accessors and iterators here
  */
 #define __real_pte(e,p)        ((real_pte_t) { \
-                       (e), ((e) & _PAGE_COMBO) ? \
+                       (e), (pte_val(e) & _PAGE_COMBO) ? \
                                (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
 #define __rpte_to_hidx(r,index)        ((pte_val((r).pte) & _PAGE_COMBO) ? \
         (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
index a6136515c7f2952d82843eb47586901b3747f22f..4a9e408644fe6ae0403d49821d48516092e51640 100644 (file)
 #define MSR_TM_TRANSACTIONAL(x)        (((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)    (((x) & MSR_TS_MASK) == MSR_TS_S)
 
-/* Reason codes describing kernel causes for transaction aborts.  By
-   convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
-   the failure is persistent.
-*/
-#define TM_CAUSE_RESCHED       0xfe
-#define TM_CAUSE_TLBI          0xfc
-#define TM_CAUSE_FAC_UNAV      0xfa
-#define TM_CAUSE_SYSCALL       0xf9 /* Persistent */
-#define TM_CAUSE_MISC          0xf6
-#define TM_CAUSE_SIGNAL                0xf4
-
 #if defined(CONFIG_PPC_BOOK3S_64)
 #define MSR_64BIT      MSR_SF
 
index a8bc2bb4adc97147b8a4d95b07ea54bef1016567..34fd70488d83f09ebb947680763ddbcf4ce62fad 100644 (file)
@@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex);
 extern void rtas_initialize(void);
 extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
 extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
 extern int rtas_ibm_suspend_me(struct rtas_args *);
 
 struct rtc_time;
index fbe66c463891417b846864077732eda44eed68f6..9322c28aebd2b89ac193c8470ca41c33b62730e3 100644 (file)
@@ -3,5 +3,8 @@
 
 #define __ARCH_HAS_SA_RESTORER
 #include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
 
 #endif /* _ASM_POWERPC_SIGNAL_H */
index 8ceea14d6fe44a20d0d807e28a4ca63477f18863..ba7b1973866e1933486f47ed59e3aa0efe799f34 100644 (file)
@@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_PERFMON_CTXSW      6       /* perfmon needs ctxsw calls */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SINGLESTEP         8       /* singlestepping active */
-#define TIF_MEMDIE             9       /* is terminating due to OOM killer */
+#define TIF_NOHZ               9       /* in adaptive nohz mode */
 #define TIF_SECCOMP            10      /* secure computing */
 #define TIF_RESTOREALL         11      /* Restore all regs (implies NOERROR) */
 #define TIF_NOERROR            12      /* Force successful syscall return */
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SYSCALL_TRACEPOINT 15      /* syscall tracepoint instrumentation */
 #define TIF_EMULATE_STACK_STORE        16      /* Is an instruction emulation
                                                for stack store? */
+#define TIF_MEMDIE             17      /* is terminating due to OOM killer */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_UPROBE            (1<<TIF_UPROBE)
 #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_EMULATE_STACK_STORE       (1<<TIF_EMULATE_STACK_STORE)
+#define _TIF_NOHZ              (1<<TIF_NOHZ)
 #define _TIF_SYSCALL_T_OR_A    (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
+                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+                                _TIF_NOHZ)
 
 #define _TIF_USER_WORK_MASK    (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                                 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
index 4b4449abf3f854d8a29a1bed14fb273bf94f612b..9dfbc34bdbf5e7e86eda005d4e974f5fe3ebd12a 100644 (file)
@@ -5,6 +5,8 @@
  * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
  */
 
+#include <uapi/asm/tm.h>
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 extern void do_load_up_transact_fpu(struct thread_struct *thread);
 extern void do_load_up_transact_altivec(struct thread_struct *thread);
index 5a7510e9d09d3865619b7a874efa7584074b5f7a..dc590919f8eb370d1568d54f7c002d58f63b2dad 100644 (file)
@@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void);
 extern void __init udbg_init_cpm(void);
 extern void __init udbg_init_usbgecko(void);
 extern void __init udbg_init_wsp(void);
+extern void __init udbg_init_memcons(void);
 extern void __init udbg_init_ehv_bc(void);
 extern void __init udbg_init_ps3gelic(void);
 extern void __init udbg_init_debug_opal_raw(void);
index f7bca6370745b49133a771d9aff43a742116b229..5182c8622b54eea94ba18cfad7236872ae056865 100644 (file)
@@ -40,6 +40,7 @@ header-y += statfs.h
 header-y += swab.h
 header-y += termbits.h
 header-y += termios.h
+header-y += tm.h
 header-y += types.h
 header-y += ucontext.h
 header-y += unistd.h
index a36daf3c6f9a31faac9e922d0c77b4c5fe412c37..405fb09bda94184de0876d46fb9ee3618d5bece8 100644 (file)
@@ -81,4 +81,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644 (file)
index 0000000..85059a0
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts.  By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent.  PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT    0x01
+#define TM_CAUSE_RESCHED       0xde
+#define TM_CAUSE_TLBI          0xdc
+#define TM_CAUSE_FAC_UNAV      0xda
+#define TM_CAUSE_SYSCALL       0xd8  /* future use */
+#define TM_CAUSE_MISC          0xd6  /* future use */
+#define TM_CAUSE_SIGNAL                0xd4
+#define TM_CAUSE_ALIGNMENT     0xd2
+#define TM_CAUSE_EMULATE       0xd0
+
+#endif
index b51a97cfedf88ff2b7884ef19485ff081d18cf78..6f16ffafa6f01542d54ccc684c3f8d5d7d6add5c 100644 (file)
@@ -127,6 +127,12 @@ int main(void)
        DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
        DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
        DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
+       DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
+       DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
+       DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
+       DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
+       DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
+       DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
index a283b6442b266544235e11bc061d33d58f321b05..18b5b9cf8e3730f4608ca6ff997810d5cc47df9a 100644 (file)
@@ -135,8 +135,12 @@ __init_HFSCR:
        blr
 
 __init_TLB:
-       /* Clear the TLB */
-       li      r6,128
+       /*
+        * Clear the TLB using the "IS 3" form of tlbiel instruction
+        * (invalidate by congruence class). P7 has 128 CCs, P8 has 512
+        * so we just always do 512
+        */
+       li      r6,512
        mtctr   r6
        li      r7,0xc00        /* IS field = 0b11 */
        ptesync
index c60bbec25c1fe98ca73581ef576d9785c2561a73..2a45d0f043852a33cc41826c7835c59471b1ed60 100644 (file)
@@ -452,7 +452,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .mmu_features           = MMU_FTRS_POWER8,
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
-               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .oprofile_type          = PPC_OPROFILE_INVALID,
                .oprofile_cpu_type      = "ppc64/ibm-compat-v1",
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .cpu_name               = "POWER7+ (raw)",
                .cpu_features           = CPU_FTRS_POWER7,
                .cpu_user_features      = COMMON_USER_POWER7,
-               .cpu_user_features      = COMMON_USER2_POWER7,
+               .cpu_user_features2     = COMMON_USER2_POWER7,
                .mmu_features           = MMU_FTRS_POWER7,
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
@@ -507,7 +507,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .num_pmcs               = 6,
                .pmc_type               = PPC_PMC_IBM,
                .oprofile_cpu_type      = "ppc64/power8",
-               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
                .platform               = "power8",
index e514de57a125333a4ce174cb399070b6cc62e3b4..22b45a4955cd8f03a26c89a7fb2ab79252f2b6d4 100644 (file)
@@ -439,8 +439,6 @@ ret_from_fork:
 ret_from_kernel_thread:
        REST_NVGPRS(r1)
        bl      schedule_tail
-       li      r3,0
-       stw     r3,0(r1)
        mtlr    r14
        mr      r3,r15
        PPC440EP_ERR42
@@ -851,7 +849,7 @@ resume_kernel:
        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
        CURRENT_THREAD_INFO(r9, r1)
        lwz     r8,TI_FLAGS(r9)
-       andis.  r8,r8,_TIF_EMULATE_STACK_STORE@h
+       andis.  r0,r8,_TIF_EMULATE_STACK_STORE@h
        beq+    1f
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
index 915fbb4fc2fe4e7534dbf78eb39317013b03b014..8741c854e03d50800cba18ad091f3d843c6a0111 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/irqflags.h>
 #include <asm/ftrace.h>
 #include <asm/hw_irq.h>
+#include <asm/context_tracking.h>
 
 /*
  * System calls.
@@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork)
 _GLOBAL(ret_from_kernel_thread)
        bl      .schedule_tail
        REST_NVGPRS(r1)
-       li      r3,0
-       std     r3,0(r1)
        ld      r14, 0(r14)
        mtlr    r14
        mr      r3,r15
@@ -488,6 +487,13 @@ BEGIN_FTR_SECTION
        ldarx   r6,0,r1
 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
 
+#ifdef CONFIG_PPC_BOOK3S
+/* Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+       DCBT_STOP_ALL_STREAM_IDS(r6)
+#endif
+
        addi    r6,r4,-THREAD   /* Convert THREAD to 'current' */
        std     r6,PACACURRENT(r13)     /* Set new 'current' */
 
@@ -634,7 +640,7 @@ _GLOBAL(ret_from_except_lite)
        andi.   r0,r4,_TIF_NEED_RESCHED
        beq     1f
        bl      .restore_interrupts
-       bl      .schedule
+       SCHEDULE_USER
        b       .ret_from_except_lite
 
 1:     bl      .save_nvgprs
index 42a756eec9ff7ea6a240bbef197e6519b1abd008..645170a07ada1da7abf4fddc814fcbaad9c071c9 100644 (file)
@@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
         */
 
        mfspr   r14,SPRN_DBSR           /* check single-step/branch taken */
-       andis.  r15,r14,DBSR_IC@h
+       andis.  r15,r14,(DBSR_IC|DBSR_BT)@h
        beq+    1f
 
        LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        bge+    cr1,1f
 
        /* here it looks like we got an inappropriate debug exception. */
-       lis     r14,DBSR_IC@h           /* clear the IC event */
+       lis     r14,(DBSR_IC|DBSR_BT)@h         /* clear the event */
        rlwinm  r11,r11,0,~MSR_DE       /* clear DE in the CSRR1 value */
        mtspr   SPRN_DBSR,r14
        mtspr   SPRN_CSRR1,r11
@@ -555,7 +555,7 @@ kernel_dbg_exc:
         */
 
        mfspr   r14,SPRN_DBSR           /* check single-step/branch taken */
-       andis.  r15,r14,DBSR_IC@h
+       andis.  r15,r14,(DBSR_IC|DBSR_BT)@h
        beq+    1f
 
        LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -566,7 +566,7 @@ kernel_dbg_exc:
        bge+    cr1,1f
 
        /* here it looks like we got an inappropriate debug exception. */
-       lis     r14,DBSR_IC@h           /* clear the IC event */
+       lis     r14,(DBSR_IC|DBSR_BT)@h         /* clear the event */
        rlwinm  r11,r11,0,~MSR_DE       /* clear DE in the DSRR1 value */
        mtspr   SPRN_DBSR,r14
        mtspr   SPRN_DSRR1,r11
index e6eba1bf61ad55de486cd19f725743493e64cd84..40e4a17c8ba0f249e2b65d85d4014fe7aa47f7a7 100644 (file)
@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
        xori    r10,r10,(MSR_FE0|MSR_FE1)
        mtmsrd  r10
        sync
-       fmr     0,0
-       fmr     1,1
-       fmr     2,2
-       fmr     3,3
-       fmr     4,4
-       fmr     5,5
-       fmr     6,6
-       fmr     7,7
-       fmr     8,8
-       fmr     9,9
-       fmr     10,10
-       fmr     11,11
-       fmr     12,12
-       fmr     13,13
-       fmr     14,14
-       fmr     15,15
-       fmr     16,16
-       fmr     17,17
-       fmr     18,18
-       fmr     19,19
-       fmr     20,20
-       fmr     21,21
-       fmr     22,22
-       fmr     23,23
-       fmr     24,24
-       fmr     25,25
-       fmr     26,26
-       fmr     27,27
-       fmr     28,28
-       fmr     29,29
-       fmr     30,30
-       fmr     31,31
+
+#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
+#define FMR4(n)  FMR2(n) ; FMR2(n+2)
+#define FMR8(n)  FMR4(n) ; FMR4(n+4)
+#define FMR16(n) FMR8(n) ; FMR8(n+8)
+#define FMR32(n) FMR16(n) ; FMR16(n+16)
+       FMR32(0)
+
 FTR_SECTION_ELSE
 /*
  * To denormalise we need to move a copy of the register to itself.
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE
        oris    r10,r10,MSR_VSX@h
        mtmsrd  r10
        sync
-       XVCPSGNDP(0,0,0)
-       XVCPSGNDP(1,1,1)
-       XVCPSGNDP(2,2,2)
-       XVCPSGNDP(3,3,3)
-       XVCPSGNDP(4,4,4)
-       XVCPSGNDP(5,5,5)
-       XVCPSGNDP(6,6,6)
-       XVCPSGNDP(7,7,7)
-       XVCPSGNDP(8,8,8)
-       XVCPSGNDP(9,9,9)
-       XVCPSGNDP(10,10,10)
-       XVCPSGNDP(11,11,11)
-       XVCPSGNDP(12,12,12)
-       XVCPSGNDP(13,13,13)
-       XVCPSGNDP(14,14,14)
-       XVCPSGNDP(15,15,15)
-       XVCPSGNDP(16,16,16)
-       XVCPSGNDP(17,17,17)
-       XVCPSGNDP(18,18,18)
-       XVCPSGNDP(19,19,19)
-       XVCPSGNDP(20,20,20)
-       XVCPSGNDP(21,21,21)
-       XVCPSGNDP(22,22,22)
-       XVCPSGNDP(23,23,23)
-       XVCPSGNDP(24,24,24)
-       XVCPSGNDP(25,25,25)
-       XVCPSGNDP(26,26,26)
-       XVCPSGNDP(27,27,27)
-       XVCPSGNDP(28,28,28)
-       XVCPSGNDP(29,29,29)
-       XVCPSGNDP(30,30,30)
-       XVCPSGNDP(31,31,31)
+
+#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
+#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
+#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
+#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
+#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
+       XVCPSGNDP32(0)
+
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+       b       denorm_done
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER8 we need to do that for all 64 VSX registers
+ */
+       XVCPSGNDP32(32)
+denorm_done:
        mtspr   SPRN_HSRR0,r11
        mtcrf   0x80,r9
        ld      r9,PACA_EXGEN+EX_R9(r13)
@@ -721,7 +683,7 @@ machine_check_common:
        STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
        STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
        STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
-       STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+       STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
        STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
 #ifdef CONFIG_PPC_DOORBELL
        STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
index 5cbcf4d5a808db3f2d25fdc4fe01ea26e3d0f247..ea185e0b3cae5800145b0e3ee0388534369f3686 100644 (file)
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
         * in case we also had a rollover while hard disabled
         */
        local_paca->irq_happened &= ~PACA_IRQ_DEC;
-       if (decrementer_check_overflow())
+       if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
                return 0x900;
 
        /* Finally check if an external interrupt happened */
index 466a2908bb634506e034cf63ab1da77b2036d95c..611acdf30096643a1d94bc3111b685e28d290453 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/cpu.h>
+#include <linux/hardirq.h>
 
 #include <asm/page.h>
 #include <asm/current.h>
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
        pr_debug("kexec: Starting switchover sequence.\n");
 
        /* switch to a staticly allocated stack.  Based on irq stack code.
+        * We setup preempt_count to avoid using VMX in memcpy.
         * XXX: the task struct will likely be invalid once we do the copy!
         */
        kexec_stack.thread_info.task = current_thread_info()->task;
        kexec_stack.thread_info.flags = 0;
+       kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
+       kexec_stack.thread_info.cpu = current_thread_info()->cpu;
 
        /* We need a static PACA, too; copy this CPU's PACA over and switch to
         * it.  Also poison per_cpu_offset to catch anyone using non-static
index 19e096bd0e73d8b9a530159a4b9b440efc25403f..e469f30e6eeb88b4668e11a86d287e99b67aeae2 100644 (file)
@@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2)
        li      r3,2
        blr
 
+_GLOBAL(__bswapdi2)
+       rotlwi  r9,r4,8
+       rotlwi  r10,r3,8
+       rlwimi  r9,r4,24,0,7
+       rlwimi  r10,r3,24,0,7
+       rlwimi  r9,r4,24,16,23
+       rlwimi  r10,r3,24,16,23
+       mr      r3,r9
+       mr      r4,r10
+       blr
+
 _GLOBAL(abs)
        srawi   r4,r3,31
        xor     r3,r3,r4
index 5cfa8008693b83aaae96dd4f5d91a7e8190d234b..6820e45f557b73b848fda82c512332752b760dc2 100644 (file)
@@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache)
        isync
        blr
 
+_GLOBAL(__bswapdi2)
+       srdi    r8,r3,32
+       rlwinm  r7,r3,8,0xffffffff
+       rlwimi  r7,r3,24,0,7
+       rlwinm  r9,r8,8,0xffffffff
+       rlwimi  r7,r3,24,16,23
+       rlwimi  r9,r8,24,0,7
+       rlwimi  r9,r8,24,16,23
+       sldi    r7,r7,32
+       or      r3,r7,r9
+       blr
 
 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
 /*
index f5c5c90799a763eb2a188316ea95e5d761ffe232..eabeec991016b0c6dbbc5d3c4b40d8cf30bfecfb 100644 (file)
@@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
                                      enum pci_mmap_state mmap_state,
                                      int write_combine)
 {
-       unsigned long prot = pgprot_val(protection);
 
        /* Write combine is always 0 on non-memory space mappings. On
         * memory space, if the user didn't pass 1, we check for a
@@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
 
        /* XXX would be nice to have a way to ask for write-through */
        if (write_combine)
-               return pgprot_noncached_wc(prot);
+               return pgprot_noncached_wc(protection);
        else
-               return pgprot_noncached(prot);
+               return pgprot_noncached(protection);
 }
 
 /*
@@ -658,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
  *     ranges. However, some machines (thanks Apple !) tend to split their
  *     space into lots of small contiguous ranges. So we have to coalesce.
  *
- *   - We can only cope with all memory ranges having the same offset
- *     between CPU addresses and PCI addresses. Unfortunately, some bridges
- *     are setup for a large 1:1 mapping along with a small "window" which
- *     maps PCI address 0 to some arbitrary high address of the CPU space in
- *     order to give access to the ISA memory hole.
- *     The way out of here that I've chosen for now is to always set the
- *     offset based on the first resource found, then override it if we
- *     have a different offset and the previous was set by an ISA hole.
- *
  *   - Some busses have IO space not starting at 0, which causes trouble with
  *     the way we do our IO resource renumbering. The code somewhat deals with
  *     it for 64 bits but I would expect problems on 32 bits.
@@ -681,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
        int rlen;
        int pna = of_n_addr_cells(dev);
        int np = pna + 5;
-       int memno = 0, isa_hole = -1;
+       int memno = 0;
        u32 pci_space;
        unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
-       unsigned long long isa_mb = 0;
        struct resource *res;
 
        printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -778,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                        }
                        /* Handles ISA memory hole space here */
                        if (pci_addr == 0) {
-                               isa_mb = cpu_addr;
-                               isa_hole = memno;
                                if (primary || isa_mem_base == 0)
                                        isa_mem_base = cpu_addr;
                                hose->isa_mem_phys = cpu_addr;
@@ -840,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
        }
        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
                struct resource *res = dev->resource + i;
+               struct pci_bus_region reg;
                if (!res->flags)
                        continue;
 
@@ -848,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
                 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
                 * since in that case, we don't want to re-assign anything
                 */
+               pcibios_resource_to_bus(dev, &reg, res);
                if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
-                   (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
+                   (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
                        /* Only print message if not re-assigning */
                        if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
                                pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
@@ -1521,9 +1510,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
        for (i = 0; i < 3; ++i) {
                res = &hose->mem_resources[i];
                if (!res->flags) {
-                       printk(KERN_ERR "PCI: Memory resource 0 not set for "
-                              "host bridge %s (domain %d)\n",
-                              hose->dn->full_name, hose->global_number);
+                       if (i == 0)
+                               printk(KERN_ERR "PCI: Memory resource 0 not set for "
+                                      "host bridge %s (domain %d)\n",
+                                      hose->dn->full_name, hose->global_number);
                        continue;
                }
                offset = hose->mem_offset[i];
index 873050d268406e4a343d2e597c80ec7d99795552..2e8629654ca872443e89e30455b05f54c1850eb0 100644 (file)
@@ -266,3 +266,13 @@ int pcibus_to_node(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibus_to_node);
 #endif
+
+static void quirk_radeon_32bit_msi(struct pci_dev *dev)
+{
+       struct pci_dn *pdn = pci_get_pdn(dev);
+
+       if (pdn)
+               pdn->force_32bit_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
index e7af165f8b9d402add808fc5ce3760b9674d7c7a..df038442548a1397fb3cf5c9458fc39379a381a2 100644 (file)
 #include <asm/ppc-pci.h>
 #include <asm/firmware.h>
 
+struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
+{
+       struct device_node *dn = pci_device_to_OF_node(pdev);
+       if (!dn)
+               return NULL;
+       return PCI_DN(dn);
+}
+
 /*
  * Traverse_func that inits the PCI fields of the device node.
  * NOTE: this *must* be done before read/write config to the device.
index 78b8766fd79e4605f96a103c6562ff62caa37c5e..c29666586998f27dd0f4d5aa8db721320a82cc2d 100644 (file)
@@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3);
 int __ucmpdi2(unsigned long long, unsigned long long);
 EXPORT_SYMBOL(__ucmpdi2);
 #endif
-
+long long __bswapdi2(long long);
+EXPORT_SYMBOL(__bswapdi2);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memmove);
index ceb4e7b62cf441ed6237499d11ef15cc6e2e4d50..076d1242507a7fdcf32c374b66b3b005eb4e0db9 100644 (file)
@@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
 
 static void prime_debug_regs(struct thread_struct *thread)
 {
+       /*
+        * We could have inherited MSR_DE from userspace, since
+        * it doesn't get cleared on exception entry.  Make sure
+        * MSR_DE is clear before we enable any debug events.
+        */
+       mtmsr(mfmsr() & ~MSR_DE);
+
        mtspr(SPRN_IAC1, thread->iac1);
        mtspr(SPRN_IAC2, thread->iac2);
 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
@@ -392,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 {
        mtspr(SPRN_DABR, dabr);
-       mtspr(SPRN_DABRX, dabrx);
+       if (cpu_has_feature(CPU_FTR_DABRX))
+               mtspr(SPRN_DABRX, dabrx);
        return 0;
 }
 #else
@@ -971,6 +979,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
         * do some house keeping and then return from the fork or clone
         * system call, using the stack frame created above.
         */
+       ((unsigned long *)sp)[0] = 0;
        sp -= sizeof(struct pt_regs);
        kregs = (struct pt_regs *) sp;
        sp -= STACK_FRAME_OVERHEAD;
@@ -1360,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
 
 #ifdef CONFIG_PPC64
 /* Called with hard IRQs off */
-void __ppc64_runlatch_on(void)
+void notrace __ppc64_runlatch_on(void)
 {
        struct thread_info *ti = current_thread_info();
        unsigned long ctrl;
@@ -1373,7 +1382,7 @@ void __ppc64_runlatch_on(void)
 }
 
 /* Called with hard IRQs off */
-void __ppc64_runlatch_off(void)
+void notrace __ppc64_runlatch_off(void)
 {
        struct thread_info *ti = current_thread_info();
        unsigned long ctrl;
index 3b14d320e69f81737b447b59bcef4046edde0b5a..98c2fc198712aabe3f90055e24d653df604be78e 100644 (file)
@@ -32,6 +32,7 @@
 #include <trace/syscall.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/perf_event.h>
+#include <linux/context_tracking.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
 {
        long ret = 0;
 
+       user_exit();
+
        secure_computing_strict(regs->gpr[0]);
 
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
@@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
        step = test_thread_flag(TIF_SINGLESTEP);
        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall_exit(regs, step);
+
+       user_enter();
 }
index 1fd6e7b2f390b41dac642f58e781baf735c04be4..52add6f3e201e1c196ea493f40a8fccc2cc922bc 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/capability.h>
 #include <linux/delay.h>
+#include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/completion.h>
 #include <linux/cpumask.h>
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
        __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
 }
 
+enum rtas_cpu_state {
+       DOWN,
+       UP,
+};
+
+#ifndef CONFIG_SMP
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+                               cpumask_var_t cpus)
+{
+       if (!cpumask_empty(cpus)) {
+               cpumask_clear(cpus);
+               return -EINVAL;
+       } else
+               return 0;
+}
+#else
+/* On return cpumask will be altered to indicate CPUs changed.
+ * CPUs with states changed will be set in the mask,
+ * CPUs with status unchanged will be unset in the mask. */
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+                               cpumask_var_t cpus)
+{
+       int cpu;
+       int cpuret = 0;
+       int ret = 0;
+
+       if (cpumask_empty(cpus))
+               return 0;
+
+       for_each_cpu(cpu, cpus) {
+               switch (state) {
+               case DOWN:
+                       cpuret = cpu_down(cpu);
+                       break;
+               case UP:
+                       cpuret = cpu_up(cpu);
+                       break;
+               }
+               if (cpuret) {
+                       pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+                                       __func__,
+                                       ((state == UP) ? "up" : "down"),
+                                       cpu, cpuret);
+                       if (!ret)
+                               ret = cpuret;
+                       if (state == UP) {
+                               /* clear bits for unchanged cpus, return */
+                               cpumask_shift_right(cpus, cpus, cpu);
+                               cpumask_shift_left(cpus, cpus, cpu);
+                               break;
+                       } else {
+                               /* clear bit for unchanged cpu, continue */
+                               cpumask_clear_cpu(cpu, cpus);
+                       }
+               }
+       }
+
+       return ret;
+}
+#endif
+
+int rtas_online_cpus_mask(cpumask_var_t cpus)
+{
+       int ret;
+
+       ret = rtas_cpu_state_change_mask(UP, cpus);
+
+       if (ret) {
+               cpumask_var_t tmp_mask;
+
+               if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
+                       return ret;
+
+               /* Use tmp_mask to preserve cpus mask from first failure */
+               cpumask_copy(tmp_mask, cpus);
+               rtas_offline_cpus_mask(tmp_mask);
+               free_cpumask_var(tmp_mask);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(rtas_online_cpus_mask);
+
+int rtas_offline_cpus_mask(cpumask_var_t cpus)
+{
+       return rtas_cpu_state_change_mask(DOWN, cpus);
+}
+EXPORT_SYMBOL(rtas_offline_cpus_mask);
+
 int rtas_ibm_suspend_me(struct rtas_args *args)
 {
        long state;
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
        struct rtas_suspend_me_data data;
        DECLARE_COMPLETION_ONSTACK(done);
+       cpumask_var_t offline_mask;
+       int cpuret;
 
        if (!rtas_service_present("ibm,suspend-me"))
                return -ENOSYS;
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
                return 0;
        }
 
+       if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+               return -ENOMEM;
+
        atomic_set(&data.working, 0);
        atomic_set(&data.done, 0);
        atomic_set(&data.error, 0);
        data.token = rtas_token("ibm,suspend-me");
        data.complete = &done;
+
+       /* All present CPUs must be online */
+       cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+       cpuret = rtas_online_cpus_mask(offline_mask);
+       if (cpuret) {
+               pr_err("%s: Could not bring present CPUs online.\n", __func__);
+               atomic_set(&data.error, cpuret);
+               goto out;
+       }
+
        stop_topology_update();
 
        /* Call function on all CPUs.  One of us will make the
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
 
        start_topology_update();
 
+       /* Take down CPUs not online prior to suspend */
+       cpuret = rtas_offline_cpus_mask(offline_mask);
+       if (cpuret)
+               pr_warn("%s: Could not restore CPUs to offline state.\n",
+                               __func__);
+
+out:
+       free_cpumask_var(offline_mask);
        return atomic_read(&data.error);
 }
 #else /* CONFIG_PPC_PSERIES */
index 5b3022470126ce1787bb259c89b319d7f7e9770f..2f3cdb01506de3d7791712ecd6ffeaf1fcd36352 100644 (file)
@@ -89,6 +89,7 @@
 
 /* Array sizes */
 #define VALIDATE_BUF_SIZE 4096    
+#define VALIDATE_MSG_LEN  256
 #define RTAS_MSG_MAXLEN   64
 
 /* Quirk - RTAS requires 4k list length and block size */
@@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf)
 }
 
 static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, 
-                                  char *msg)
+                                  char *msg, int msglen)
 {
        int n;
 
@@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
                n = sprintf(msg, "%d\n", args_buf->update_results);
                if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
                    (args_buf->update_results == VALIDATE_TMP_UPDATE))
-                       n += sprintf(msg + n, "%s\n", args_buf->buf);
+                       n += snprintf(msg + n, msglen - n, "%s\n",
+                                       args_buf->buf);
        } else {
                n = sprintf(msg, "%d\n", args_buf->status);
        }
@@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
 {
        struct rtas_validate_flash_t *const args_buf =
                &rtas_validate_flash_data;
-       char msg[RTAS_MSG_MAXLEN];
+       char msg[VALIDATE_MSG_LEN];
        int msglen;
 
        mutex_lock(&rtas_validate_flash_mutex);
-       msglen = get_validate_flash_msg(args_buf, msg);
+       msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
        mutex_unlock(&rtas_validate_flash_mutex);
 
        return simple_read_from_buffer(buf, count, ppos, msg, msglen);
index cf12eae02de5a694e2ba0e5298bfd8f082b688f9..457e97aa29455e6894257f443f1a223583483685 100644 (file)
 #include <linux/signal.h>
 #include <linux/uprobes.h>
 #include <linux/key.h>
+#include <linux/context_tracking.h>
 #include <asm/hw_breakpoint.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/debug.h>
+#include <asm/tm.h>
 
 #include "signal.h"
 
  * through debug.exception-trace sysctl.
  */
 
-int show_unhandled_signals = 0;
+int show_unhandled_signals = 1;
 
 /*
  * Allocate space for the signal frame
  */
-void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
                           size_t frame_size, int is_32)
 {
         unsigned long oldsp, newsp;
 
         /* Default to using normal stack */
-        oldsp = get_clean_sp(regs, is_32);
+        oldsp = get_clean_sp(sp, is_32);
 
        /* Check for alt stack */
        if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -159,6 +161,8 @@ static int do_signal(struct pt_regs *regs)
 
 void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
 {
+       user_exit();
+
        if (thread_info_flags & _TIF_UPROBE)
                uprobe_notify_resume(regs);
 
@@ -169,4 +173,41 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
        }
+
+       user_enter();
+}
+
+unsigned long get_tm_stackpointer(struct pt_regs *regs)
+{
+       /* When in an active transaction that takes a signal, we need to be
+        * careful with the stack.  It's possible that the stack has moved back
+        * up after the tbegin.  The obvious case here is when the tbegin is
+        * called inside a function that returns before a tend.  In this case,
+        * the stack is part of the checkpointed transactional memory state.
+        * If we write over this non transactionally or in suspend, we are in
+        * trouble because if we get a tm abort, the program counter and stack
+        * pointer will be back at the tbegin but our in memory stack won't be
+        * valid anymore.
+        *
+        * To avoid this, when taking a signal in an active transaction, we
+        * need to use the stack pointer from the checkpointed state, rather
+        * than the speculated state.  This ensures that the signal context
+        * (written tm suspended) will be written below the stack required for
+        * the rollback.  The transaction is aborted becuase of the treclaim,
+        * so any memory written between the tbegin and the signal will be
+        * rolled back anyway.
+        *
+        * For signals taken in non-TM or suspended mode, we use the
+        * normal/non-checkpointed stack pointer.
+        */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (MSR_TM_ACTIVE(regs->msr)) {
+               tm_enable();
+               tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+               if (MSR_TM_TRANSACTIONAL(regs->msr))
+                       return current->thread.ckpt_regs.gpr[1];
+       }
+#endif
+       return regs->gpr[1];
 }
index ec84c901ceabb754e7e64b6efbf33fda04b7fe22..c69b9aeb9f236646c06ef3f2cfc320ecc00cdcd9 100644 (file)
@@ -12,7 +12,7 @@
 
 extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
 
-extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
                                  size_t frame_size, int is_32);
 
 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
index 95068bf569adc17cf51d7e32bd650daaba234b17..201385c3a1ae186f9de8102d0f5a8d645c36a6d4 100644 (file)
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
 {
        unsigned long msr = regs->msr;
 
-       /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
-        * thread.transact_fpr[], thread.transact_vr[], etc.
-        */
-       tm_enable();
-       tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
        /* Make sure floating point registers are stored in regs */
        flush_fp_to_thread(current);
 
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
 
        /* Set up Signal Frame */
        /* Put a Real Time Context onto stack */
-       rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+       rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
        addr = rt_sf;
        if (unlikely(rt_sf == NULL))
                goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        unsigned long tramp;
 
        /* Set up Signal Frame */
-       frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+       frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
        if (unlikely(frame == NULL))
                goto badframe;
        sc = (struct sigcontext __user *) &frame->sctx;
index c1794286098ca2f78c5e32f4c01b0891ca04fc78..345947367ec00a4fa440e162005864d9708eae6a 100644 (file)
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
  * As above, but Transactional Memory is in use, so deliver sigcontexts
  * containing checkpointed and transactional register states.
  *
- * To do this, we treclaim to gather both sets of registers and set up the
- * 'normal' sigcontext registers with rolled-back register values such that a
- * simple signal handler sees a correct checkpointed register state.
- * If interested, a TM-aware sighandler can examine the transactional registers
- * in the 2nd sigcontext to determine the real origin of the signal.
+ * To do this, we treclaim (done before entering here) to gather both sets of
+ * registers and set up the 'normal' sigcontext registers with rolled-back
+ * register values such that a simple signal handler sees a correct
+ * checkpointed register state.  If interested, a TM-aware sighandler can
+ * examine the transactional registers in the 2nd sigcontext to determine the
+ * real origin of the signal.
  */
 static long setup_tm_sigcontexts(struct sigcontext __user *sc,
                                 struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
 
        BUG_ON(!MSR_TM_ACTIVE(regs->msr));
 
-       /* tm_reclaim rolls back all reg states, saving checkpointed (older)
-        * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
-        * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
-        * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
-        * thread.fr[]/vr[]s.  The transactional (newer) GPRs are on the
-        * stack, in *regs.
-        */
-       tm_enable();
-       tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
        flush_fp_to_thread(current);
 
 #ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
        unsigned long newsp = 0;
        long err = 0;
 
-       frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+       frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
        if (unlikely(frame == NULL))
                goto badframe;
 
index 83efa2f7d9266ced97726aa6b26c1739076a6ec1..c0e5caf8ccc72c0f7624b1e7dc9c3f7ecfe4b242 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kdebug.h>
 #include <linux/debugfs.h>
 #include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
 
 #include <asm/emulated_ops.h>
 #include <asm/pgtable.h>
@@ -52,6 +53,7 @@
 #ifdef CONFIG_PPC64
 #include <asm/firmware.h>
 #include <asm/processor.h>
+#include <asm/tm.h>
 #endif
 #include <asm/kexec.h>
 #include <asm/ppc-opcode.h>
@@ -667,6 +669,7 @@ int machine_check_generic(struct pt_regs *regs)
 
 void machine_check_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
        int recover = 0;
 
        __get_cpu_var(irq_stat).mce_exceptions++;
@@ -683,7 +686,7 @@ void machine_check_exception(struct pt_regs *regs)
                recover = cur_cpu_spec->machine_check(regs);
 
        if (recover > 0)
-               return;
+               goto bail;
 
 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
        /* the qspan pci read routines can cause machine checks -- Cort
@@ -693,20 +696,23 @@ void machine_check_exception(struct pt_regs *regs)
         * -- BenH
         */
        bad_page_fault(regs, regs->dar, SIGBUS);
-       return;
+       goto bail;
 #endif
 
        if (debugger_fault_handler(regs))
-               return;
+               goto bail;
 
        if (check_io_access(regs))
-               return;
+               goto bail;
 
        die("Machine check", regs, SIGBUS);
 
        /* Must die if the interrupt is not recoverable */
        if (!(regs->msr & MSR_RI))
                panic("Unrecoverable Machine check");
+
+bail:
+       exception_exit(prev_state);
 }
 
 void SMIException(struct pt_regs *regs)
@@ -716,20 +722,29 @@ void SMIException(struct pt_regs *regs)
 
 void unknown_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
               regs->nip, regs->msr, regs->trap);
 
        _exception(SIGTRAP, regs, 0, 0);
+
+       exception_exit(prev_state);
 }
 
 void instruction_breakpoint_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
                                        5, SIGTRAP) == NOTIFY_STOP)
-               return;
+               goto bail;
        if (debugger_iabr_match(regs))
-               return;
+               goto bail;
        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+
+bail:
+       exception_exit(prev_state);
 }
 
 void RunModeException(struct pt_regs *regs)
@@ -739,15 +754,20 @@ void RunModeException(struct pt_regs *regs)
 
 void __kprobes single_step_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        clear_single_step(regs);
 
        if (notify_die(DIE_SSTEP, "single_step", regs, 5,
                                        5, SIGTRAP) == NOTIFY_STOP)
-               return;
+               goto bail;
        if (debugger_sstep(regs))
-               return;
+               goto bail;
 
        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+
+bail:
+       exception_exit(prev_state);
 }
 
 /*
@@ -913,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
        return 0;
 }
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline bool tm_abort_check(struct pt_regs *regs, int cause)
+{
+        /* If we're emulating a load/store in an active transaction, we cannot
+         * emulate it as the kernel operates in transaction suspended context.
+         * We need to abort the transaction.  This creates a persistent TM
+         * abort so tell the user what caused it with a new code.
+        */
+       if (MSR_TM_TRANSACTIONAL(regs->msr)) {
+               tm_enable();
+               tm_abort(cause);
+               return true;
+       }
+       return false;
+}
+#else
+static inline bool tm_abort_check(struct pt_regs *regs, int reason)
+{
+       return false;
+}
+#endif
+
 static int emulate_instruction(struct pt_regs *regs)
 {
        u32 instword;
@@ -952,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs)
 
        /* Emulate load/store string insn. */
        if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+               if (tm_abort_check(regs,
+                                  TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+                       return -EINVAL;
                PPC_WARN_EMULATED(string, regs);
                return emulate_string_inst(regs, instword);
        }
@@ -1005,6 +1050,7 @@ int is_valid_bugaddr(unsigned long addr)
 
 void __kprobes program_check_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
        unsigned int reason = get_reason(regs);
        extern int do_mathemu(struct pt_regs *regs);
 
@@ -1014,26 +1060,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
        if (reason & REASON_FP) {
                /* IEEE FP exception */
                parse_fpe(regs);
-               return;
+               goto bail;
        }
        if (reason & REASON_TRAP) {
                /* Debugger is first in line to stop recursive faults in
                 * rcu_lock, notify_die, or atomic_notifier_call_chain */
                if (debugger_bpt(regs))
-                       return;
+                       goto bail;
 
                /* trap exception */
                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
                                == NOTIFY_STOP)
-                       return;
+                       goto bail;
 
                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
                        regs->nip += 4;
-                       return;
+                       goto bail;
                }
                _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
-               return;
+               goto bail;
        }
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        if (reason & REASON_TM) {
@@ -1049,7 +1095,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                if (!user_mode(regs) &&
                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
                        regs->nip += 4;
-                       return;
+                       goto bail;
                }
                /* If usermode caused this, it's done something illegal and
                 * gets a SIGILL slap on the wrist.  We call it an illegal
@@ -1059,7 +1105,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                 */
                if (user_mode(regs)) {
                        _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
-                       return;
+                       goto bail;
                } else {
                        printk(KERN_EMERG "Unexpected TM Bad Thing exception "
                               "at %lx (msr 0x%x)\n", regs->nip, reason);
@@ -1083,16 +1129,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
        switch (do_mathemu(regs)) {
        case 0:
                emulate_single_step(regs);
-               return;
+               goto bail;
        case 1: {
                        int code = 0;
                        code = __parse_fpscr(current->thread.fpscr.val);
                        _exception(SIGFPE, regs, code, regs->nip);
-                       return;
+                       goto bail;
                }
        case -EFAULT:
                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
-               return;
+               goto bail;
        }
        /* fall through on any other errors */
 #endif /* CONFIG_MATH_EMULATION */
@@ -1103,10 +1149,10 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                case 0:
                        regs->nip += 4;
                        emulate_single_step(regs);
-                       return;
+                       goto bail;
                case -EFAULT:
                        _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
-                       return;
+                       goto bail;
                }
        }
 
@@ -1114,16 +1160,33 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
        else
                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+
+bail:
+       exception_exit(prev_state);
+}
+
+/*
+ * This occurs when running in hypervisor mode on POWER6 or later
+ * and an illegal instruction is encountered.
+ */
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
+{
+       regs->msr |= REASON_ILLEGAL;
+       program_check_exception(regs);
 }
 
 void alignment_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
        int sig, code, fixed = 0;
 
        /* We restore the interrupt state now */
        if (!arch_irq_disabled_regs(regs))
                local_irq_enable();
 
+       if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
+               goto bail;
+
        /* we don't implement logging of alignment exceptions */
        if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
                fixed = fix_alignment(regs);
@@ -1131,7 +1194,7 @@ void alignment_exception(struct pt_regs *regs)
        if (fixed == 1) {
                regs->nip += 4; /* skip over emulated instruction */
                emulate_single_step(regs);
-               return;
+               goto bail;
        }
 
        /* Operand address was bad */
@@ -1146,6 +1209,9 @@ void alignment_exception(struct pt_regs *regs)
                _exception(sig, regs, code, regs->dar);
        else
                bad_page_fault(regs, regs->dar, sig);
+
+bail:
+       exception_exit(prev_state);
 }
 
 void StackOverflow(struct pt_regs *regs)
@@ -1174,23 +1240,32 @@ void trace_syscall(struct pt_regs *regs)
 
 void kernel_fp_unavailable_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
                          "%lx at %lx\n", regs->trap, regs->nip);
        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
+
+       exception_exit(prev_state);
 }
 
 void altivec_unavailable_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        if (user_mode(regs)) {
                /* A user program has executed an altivec instruction,
                   but this kernel doesn't support altivec. */
                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-               return;
+               goto bail;
        }
 
        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
                        "%lx at %lx\n", regs->trap, regs->nip);
        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
+
+bail:
+       exception_exit(prev_state);
 }
 
 void vsx_unavailable_exception(struct pt_regs *regs)
index 13b867093499be5abccb014677371f8787390c7f..9d3fdcd66290b79c6200cd853a04eea73be1a3d0 100644 (file)
@@ -64,6 +64,9 @@ void __init udbg_early_init(void)
        udbg_init_usbgecko();
 #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
        udbg_init_wsp();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
+       /* In memory console */
+       udbg_init_memcons();
 #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
        udbg_init_ehv_bc();
 #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
index 5dd3ab46997603e6f55c681781b773e50a0ff557..ed03854481483afe5159a095fd8d8a2d6f042f2b 100644 (file)
@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
        struct kvmppc_44x_tlbe *tlbe;
        unsigned int gtlb_index;
+       int idx;
 
        gtlb_index = kvmppc_get_gpr(vcpu, ra);
        if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
                return EMULATE_FAIL;
        }
 
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
        if (tlbe_is_host_safe(vcpu, tlbe)) {
                gva_t eaddr;
                gpa_t gpaddr;
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
                kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
        }
 
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
        trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
                             tlbe->word2);
 
index 9de24f8e03c71b44e0407b65bbe137c3506650e3..550f5928b394f6cb4c1da71978031a1ce1f96b79 100644 (file)
@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_CPPR:
        case H_EOI:
        case H_IPI:
+       case H_IPOLL:
+       case H_XIRR_X:
                if (kvmppc_xics_enabled(vcpu)) {
                        ret = kvmppc_xics_hcall(vcpu, req);
                        break;
index b24309c6c2d507d3f2c008358c3c0728d450a7cd..da0e0bc268bd4bce1322b696dd25972c24bc740e 100644 (file)
@@ -257,6 +257,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
        case H_CPPR:
        case H_EOI:
        case H_IPI:
+       case H_IPOLL:
+       case H_XIRR_X:
                if (kvmppc_xics_enabled(vcpu))
                        return kvmppc_h_pr_xics_hcall(vcpu, cmd);
                break;
index f7a103756618e530014d0dc03508a91ff00ded2b..94c1dd46b83d54e98a96c6af3b2e682072f027e6 100644 (file)
@@ -650,6 +650,23 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
        return H_SUCCESS;
 }
 
+static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+       union kvmppc_icp_state state;
+       struct kvmppc_icp *icp;
+
+       icp = vcpu->arch.icp;
+       if (icp->server_num != server) {
+               icp = kvmppc_xics_find_server(vcpu->kvm, server);
+               if (!icp)
+                       return H_PARAMETER;
+       }
+       state = ACCESS_ONCE(icp->state);
+       kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
+       kvmppc_set_gpr(vcpu, 5, state.mfrr);
+       return H_SUCCESS;
+}
+
 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
 {
        union kvmppc_icp_state old_state, new_state;
@@ -787,6 +804,18 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
        if (!xics || !vcpu->arch.icp)
                return H_HARDWARE;
 
+       /* These requests don't have real-mode implementations at present */
+       switch (req) {
+       case H_XIRR_X:
+               res = kvmppc_h_xirr(vcpu);
+               kvmppc_set_gpr(vcpu, 4, res);
+               kvmppc_set_gpr(vcpu, 5, get_tb());
+               return rc;
+       case H_IPOLL:
+               rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+               return rc;
+       }
+
        /* Check for real mode returning too hard */
        if (xics->real_mode)
                return kvmppc_xics_rm_complete(vcpu, req);
index 1020119226dbefe5589726758bbff2f03c70ec53..5cd7ad0c11764ec59432d945b5d824ea05f6603d 100644 (file)
@@ -832,6 +832,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 {
        int r = RESUME_HOST;
        int s;
+       int idx;
+
+#ifdef CONFIG_PPC64
+       WARN_ON(local_paca->irq_happened != 0);
+#endif
+
+       /*
+        * We enter with interrupts disabled in hardware, but
+        * we need to call hard_irq_disable anyway to ensure that
+        * the software state is kept in sync.
+        */
+       hard_irq_disable();
 
        /* update before a new last_exit_type is rewritten */
        kvmppc_update_timing_stats(vcpu);
@@ -1053,6 +1065,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        break;
                }
 
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+
                gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
                gfn = gpaddr >> PAGE_SHIFT;
 
@@ -1075,6 +1089,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        kvmppc_account_exit(vcpu, MMIO_EXITS);
                }
 
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        }
 
@@ -1098,6 +1113,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
                kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
 
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+
                gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
                gfn = gpaddr >> PAGE_SHIFT;
 
@@ -1114,6 +1131,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
                }
 
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        }
 
index c41a5a96b558bd8d17ccaf3adc8b49e95c59e817..6d6f153b6c1d85f996d65056c328e172fb3bbc37 100644 (file)
@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
        struct kvm_book3e_206_tlb_entry *gtlbe;
        int tlbsel, esel;
        int recal = 0;
+       int idx;
 
        tlbsel = get_tlb_tlbsel(vcpu);
        esel = get_tlb_esel(vcpu, tlbsel);
@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
                        kvmppc_set_tlb1map_range(vcpu, gtlbe);
        }
 
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
        if (tlbe_is_host_safe(vcpu, gtlbe)) {
                u64 eaddr = get_tlb_eaddr(gtlbe);
@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
                kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
        }
 
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
        kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
        return EMULATE_DONE;
 }
index 753cc99eff2be8dab0f55c7d34409dc33d30a7fa..19c8379575f70284a4e12cc56b1b8462ca3cd5e5 100644 (file)
@@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void)
                r = 0;
        else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
                r = 0;
-       else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
-               r = 0;
        else
                r = -ENOTSUPP;
 
index 0ef75bf0695cee25185842625526e1d46a4e7026..395c594722a223e339944905ddeb675670c9085e 100644 (file)
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7)
         * aligned we don't need to clear the bottom 7 bits of either
         * address.
         */
-       ori     r9,r3,1         /* stream=1 */
+       ori     r9,r3,1         /* stream=1 => to */
 
 #ifdef CONFIG_PPC_64K_PAGES
-       lis     r7,0x0E01       /* depth=7, units=512 */
+       lis     r7,0x0E01       /* depth=7
+                                * units/cachelines=512 */
 #else
        lis     r7,0x0E00       /* depth=7 */
-       ori     r7,r7,0x1000    /* units=32 */
+       ori     r7,r7,0x1000    /* units/cachelines=32 */
 #endif
        ori     r10,r7,1        /* stream=1 */
 
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7)
 
 .machine push
 .machine "power4"
-       dcbt    r0,r4,0b01000
-       dcbt    r0,r7,0b01010
-       dcbtst  r0,r9,0b01000
-       dcbtst  r0,r10,0b01010
+       /* setup read stream 0  */
+       dcbt    r0,r4,0b01000   /* addr from */
+       dcbt    r0,r7,0b01010   /* length and depth from */
+       /* setup write stream 1 */
+       dcbtst  r0,r9,0b01000   /* addr to */
+       dcbtst  r0,r10,0b01010  /* length and depth to */
        eieio
-       dcbt    r0,r8,0b01010   /* GO */
+       dcbt    r0,r8,0b01010   /* all streams GO */
 .machine pop
 
 #ifdef CONFIG_ALTIVEC
index 0d24ff15f5f6fb197af85a4d9e537eb8880f099c..d1f11795a7ad64bd6bd05beb522e07e44eb46498 100644 (file)
@@ -318,12 +318,14 @@ err1;     stb     r0,0(r3)
 
 .machine push
 .machine "power4"
-       dcbt    r0,r6,0b01000
-       dcbt    r0,r7,0b01010
-       dcbtst  r0,r9,0b01000
-       dcbtst  r0,r10,0b01010
+       /* setup read stream 0 */
+       dcbt    r0,r6,0b01000   /* addr from */
+       dcbt    r0,r7,0b01010   /* length and depth from */
+       /* setup write stream 1 */
+       dcbtst  r0,r9,0b01000   /* addr to */
+       dcbtst  r0,r10,0b01010  /* length and depth to */
        eieio
-       dcbt    r0,r8,0b01010   /* GO */
+       dcbt    r0,r8,0b01010   /* all streams GO */
 .machine pop
 
        beq     cr1,.Lunwind_stack_nonvmx_copy
index 229951ffc35137beb826657c826813b9733eba9b..8726779e1409b5da36c1e1ea852276cf47d4252c 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/perf_event.h>
 #include <linux/magic.h>
 #include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
 int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                            unsigned long error_code)
 {
+       enum ctx_state prev_state = exception_enter();
        struct vm_area_struct * vma;
        struct mm_struct *mm = current->mm;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        int trap = TRAP(regs);
        int is_exec = trap == 0x400;
        int fault;
+       int rc = 0;
 
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
        /*
@@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
         * look at it
         */
        if (error_code & ICSWX_DSI_UCT) {
-               int rc = acop_handle_fault(regs, address, error_code);
+               rc = acop_handle_fault(regs, address, error_code);
                if (rc)
-                       return rc;
+                       goto bail;
        }
 #endif /* CONFIG_PPC_ICSWX */
 
        if (notify_page_fault(regs))
-               return 0;
+               goto bail;
 
        if (unlikely(debugger_fault_handler(regs)))
-               return 0;
+               goto bail;
 
        /* On a kernel SLB miss we can only check for a valid exception entry */
-       if (!user_mode(regs) && (address >= TASK_SIZE))
-               return SIGSEGV;
+       if (!user_mode(regs) && (address >= TASK_SIZE)) {
+               rc = SIGSEGV;
+               goto bail;
+       }
 
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
                             defined(CONFIG_PPC_BOOK3S_64))
        if (error_code & DSISR_DABRMATCH) {
                /* breakpoint match */
                do_break(regs, address, error_code);
-               return 0;
+               goto bail;
        }
 #endif
 
@@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                local_irq_enable();
 
        if (in_atomic() || mm == NULL) {
-               if (!user_mode(regs))
-                       return SIGSEGV;
+               if (!user_mode(regs)) {
+                       rc = SIGSEGV;
+                       goto bail;
+               }
                /* in_atomic() in user mode is really bad,
                   as is current->mm == NULL. */
                printk(KERN_EMERG "Page fault in user mode with "
@@ -417,9 +424,11 @@ good_area:
         */
        fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
-               int rc = mm_fault_error(regs, address, fault);
+               rc = mm_fault_error(regs, address, fault);
                if (rc >= MM_FAULT_RETURN)
-                       return rc;
+                       goto bail;
+               else
+                       rc = 0;
        }
 
        /*
@@ -454,7 +463,7 @@ good_area:
        }
 
        up_read(&mm->mmap_sem);
-       return 0;
+       goto bail;
 
 bad_area:
        up_read(&mm->mmap_sem);
@@ -463,7 +472,7 @@ bad_area_nosemaphore:
        /* User mode accesses cause a SIGSEGV */
        if (user_mode(regs)) {
                _exception(SIGSEGV, regs, code, address);
-               return 0;
+               goto bail;
        }
 
        if (is_exec && (error_code & DSISR_PROTFAULT))
@@ -471,7 +480,11 @@ bad_area_nosemaphore:
                                   " page (%lx) - exploit attempt? (uid: %d)\n",
                                   address, from_kuid(&init_user_ns, current_uid()));
 
-       return SIGSEGV;
+       rc = SIGSEGV;
+
+bail:
+       exception_exit(prev_state);
+       return rc;
 
 }
 
index 6a2aead5b0e5e1e78c5c742c12a97a83755859bc..4c122c3f1623c7525e682338f555f73678263f72 100644 (file)
@@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 
        hpte_v = hptep->v;
        actual_psize = hpte_actual_psize(hptep, psize);
+       /*
+        * We need to invalidate the TLB always because hpte_remove doesn't do
+        * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+        * random entry from it. When we do that we don't invalidate the TLB
+        * (hpte_remove) because we assume the old translation is still
+        * technically "valid".
+        */
        if (actual_psize < 0) {
-               native_unlock_hpte(hptep);
-               return -1;
+               actual_psize = psize;
+               ret = -1;
+               goto err_out;
        }
-       /* Even if we miss, we need to invalidate the TLB */
        if (!HPTE_V_COMPARE(hpte_v, want_v)) {
                DBG_LOW(" -> miss\n");
                ret = -1;
@@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
                hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
                        (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
        }
+err_out:
        native_unlock_hpte(hptep);
 
        /* Ensure it is out of the tlb too. */
@@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
        hptep = htab_address + slot;
        actual_psize = hpte_actual_psize(hptep, psize);
        if (actual_psize < 0)
-               return;
+               actual_psize = psize;
 
        /* Update the HPTE */
        hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
@@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
        hpte_v = hptep->v;
 
        actual_psize = hpte_actual_psize(hptep, psize);
+       /*
+        * We need to invalidate the TLB always because hpte_remove doesn't do
+        * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+        * random entry from it. When we do that we don't invalidate the TLB
+        * (hpte_remove) because we assume the old translation is still
+        * technically "valid".
+        */
        if (actual_psize < 0) {
+               actual_psize = psize;
                native_unlock_hpte(hptep);
-               local_irq_restore(flags);
-               return;
+               goto err_out;
        }
-       /* Even if we miss, we need to invalidate the TLB */
        if (!HPTE_V_COMPARE(hpte_v, want_v))
                native_unlock_hpte(hptep);
        else
                /* Invalidate the hpte. NOTE: this also unlocks it */
                hptep->v = 0;
 
+err_out:
        /* Invalidate the TLB */
        tlbie(vpn, psize, actual_psize, ssize, local);
-
        local_irq_restore(flags);
 }
 
index 88ac0eeaadde69bfeceb82d44527754638ed351b..e303a6d74e3a72ca2f1db230073f7ec9896a9895 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/init.h>
 #include <linux/signal.h>
 #include <linux/memblock.h>
+#include <linux/context_tracking.h>
 
 #include <asm/processor.h>
 #include <asm/pgtable.h>
@@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
  */
 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 {
+       enum ctx_state prev_state = exception_enter();
        pgd_t *pgdir;
        unsigned long vsid;
        struct mm_struct *mm;
@@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                mm = current->mm;
                if (! mm) {
                        DBG_LOW(" user region with no mm !\n");
-                       return 1;
+                       rc = 1;
+                       goto bail;
                }
                psize = get_slice_psize(mm, ea);
                ssize = user_segment_size(ea);
@@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                /* Not a valid range
                 * Send the problem up to do_page_fault 
                 */
-               return 1;
+               rc = 1;
+               goto bail;
        }
        DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
 
        /* Bad address. */
        if (!vsid) {
                DBG_LOW("Bad address!\n");
-               return 1;
+               rc = 1;
+               goto bail;
        }
        /* Get pgdir */
        pgdir = mm->pgd;
-       if (pgdir == NULL)
-               return 1;
+       if (pgdir == NULL) {
+               rc = 1;
+               goto bail;
+       }
 
        /* Check CPU locality */
        tmp = cpumask_of(smp_processor_id());
@@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
        if (ptep == NULL || !pte_present(*ptep)) {
                DBG_LOW(" no PTE !\n");
-               return 1;
+               rc = 1;
+               goto bail;
        }
 
        /* Add _PAGE_PRESENT to the required access perm */
@@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
         */
        if (access & ~pte_val(*ptep)) {
                DBG_LOW(" no access !\n");
-               return 1;
+               rc = 1;
+               goto bail;
        }
 
 #ifdef CONFIG_HUGETLB_PAGE
-       if (hugeshift)
-               return __hash_page_huge(ea, access, vsid, ptep, trap, local,
+       if (hugeshift) {
+               rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
                                        ssize, hugeshift, psize);
+               goto bail;
+       }
 #endif /* CONFIG_HUGETLB_PAGE */
 
 #ifndef CONFIG_PPC_64K_PAGES
@@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                pte_val(*(ptep + PTRS_PER_PTE)));
 #endif
        DBG_LOW(" -> rc=%d\n", rc);
+
+bail:
+       exception_exit(prev_state);
        return rc;
 }
 EXPORT_SYMBOL_GPL(hash_page);
@@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local)
  */
 void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
 {
+       enum ctx_state prev_state = exception_enter();
+
        if (user_mode(regs)) {
 #ifdef CONFIG_PPC_SUBPAGE_PROT
                if (rc == -2)
@@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
                        _exception(SIGBUS, regs, BUS_ADRERR, address);
        } else
                bad_page_fault(regs, address, SIGBUS);
+
+       exception_exit(prev_state);
 }
 
 long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
index c2787bf779ca0f8730f23b542f54c2657fe77471..a90b9c4589908078f139d953707b6ce13b6860d0 100644 (file)
@@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
                                             unsigned long phys)
 {
        int  mapped = htab_bolt_mapping(start, start + page_size, phys,
-                                       PAGE_KERNEL, mmu_vmemmap_psize,
+                                       pgprot_val(PAGE_KERNEL),
+                                       mmu_vmemmap_psize,
                                        mmu_kernel_ssize);
        BUG_ON(mapped < 0);
 }
index c427ae36374ab4fa6eb1503c15b07de2926006e6..bf56e33f8257f68717b241c98abefb2f925f2858 100644 (file)
@@ -650,8 +650,7 @@ void bpf_jit_compile(struct sk_filter *fp)
 
        proglen = cgctx.idx * 4;
        alloclen = proglen + FUNCTION_DESCR_SIZE;
-       image = module_alloc(max_t(unsigned int, alloclen,
-                                  sizeof(struct work_struct)));
+       image = module_alloc(alloclen);
        if (!image)
                goto out;
 
@@ -688,20 +687,8 @@ out:
        return;
 }
 
-static void jit_free_defer(struct work_struct *arg)
-{
-       module_free(NULL, arg);
-}
-
-/* run from softirq, we must use a work_struct to call
- * module_free() from process context
- */
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter) {
-               struct work_struct *work = (struct work_struct *)fp->bpf_func;
-
-               INIT_WORK(work, jit_free_defer);
-               schedule_work(work);
-       }
+       if (fp->bpf_func != sk_run_filter)
+               module_free(NULL, fp->bpf_func);
 }
index c627843c5b2eff6084cfd3d23972e2cbd5735d7d..29c6482890c88c89f8fbb159d5561b591b47c09f 100644 (file)
 #include <linux/perf_event.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/uaccess.h>
 #include <asm/reg.h>
 #include <asm/pmc.h>
 #include <asm/machdep.h>
 #include <asm/firmware.h>
 #include <asm/ptrace.h>
+#include <asm/code-patching.h>
 
 #define BHRB_MAX_ENTRIES       32
 #define BHRB_TARGET            0x0000000000000002
@@ -100,11 +102,15 @@ static inline int siar_valid(struct pt_regs *regs)
        return 1;
 }
 
+static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
+static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
+void power_pmu_flush_branch_stack(void) {}
+static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
 #endif /* CONFIG_PPC32 */
 
 static bool regs_use_siar(struct pt_regs *regs)
 {
-       return !!(regs->result & 1);
+       return !!regs->result;
 }
 
 /*
@@ -130,22 +136,30 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  * If we're not doing instruction sampling, give them the SDAR
  * (sampled data address).  If we are doing instruction sampling, then
  * only give them the SDAR if it corresponds to the instruction
- * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
- * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
+ * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
  */
 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
 {
        unsigned long mmcra = regs->dsisr;
-       unsigned long sdsync;
+       bool sdar_valid;
 
-       if (ppmu->flags & PPMU_SIAR_VALID)
-               sdsync = POWER7P_MMCRA_SDAR_VALID;
-       else if (ppmu->flags & PPMU_ALT_SIPR)
-               sdsync = POWER6_MMCRA_SDSYNC;
-       else
-               sdsync = MMCRA_SDSYNC;
+       if (ppmu->flags & PPMU_HAS_SIER)
+               sdar_valid = regs->dar & SIER_SDAR_VALID;
+       else {
+               unsigned long sdsync;
+
+               if (ppmu->flags & PPMU_SIAR_VALID)
+                       sdsync = POWER7P_MMCRA_SDAR_VALID;
+               else if (ppmu->flags & PPMU_ALT_SIPR)
+                       sdsync = POWER6_MMCRA_SDSYNC;
+               else
+                       sdsync = MMCRA_SDSYNC;
 
-       if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+               sdar_valid = mmcra & sdsync;
+       }
+
+       if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
                *addrp = mfspr(SPRN_SDAR);
 }
 
@@ -175,11 +189,6 @@ static bool regs_sipr(struct pt_regs *regs)
        return !!(regs->dsisr & sipr);
 }
 
-static bool regs_no_sipr(struct pt_regs *regs)
-{
-       return !!(regs->result & 2);
-}
-
 static inline u32 perf_flags_from_msr(struct pt_regs *regs)
 {
        if (regs->msr & MSR_PR)
@@ -202,7 +211,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
         * SIAR which should give slightly more reliable
         * results
         */
-       if (regs_no_sipr(regs)) {
+       if (ppmu->flags & PPMU_NO_SIPR) {
                unsigned long siar = mfspr(SPRN_SIAR);
                if (siar >= PAGE_OFFSET)
                        return PERF_RECORD_MISC_KERNEL;
@@ -233,22 +242,9 @@ static inline void perf_read_regs(struct pt_regs *regs)
        int use_siar;
 
        regs->dsisr = mmcra;
-       regs->result = 0;
-
-       if (ppmu->flags & PPMU_NO_SIPR)
-               regs->result |= 2;
-
-       /*
-        * On power8 if we're in random sampling mode, the SIER is updated.
-        * If we're in continuous sampling mode, we don't have SIPR.
-        */
-       if (ppmu->flags & PPMU_HAS_SIER) {
-               if (marked)
-                       regs->dar = mfspr(SPRN_SIER);
-               else
-                       regs->result |= 2;
-       }
 
+       if (ppmu->flags & PPMU_HAS_SIER)
+               regs->dar = mfspr(SPRN_SIER);
 
        /*
         * If this isn't a PMU exception (eg a software event) the SIAR is
@@ -273,12 +269,12 @@ static inline void perf_read_regs(struct pt_regs *regs)
                use_siar = 1;
        else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
                use_siar = 0;
-       else if (!regs_no_sipr(regs) && regs_sipr(regs))
+       else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
                use_siar = 0;
        else
                use_siar = 1;
 
-       regs->result |= use_siar;
+       regs->result = use_siar;
 }
 
 /*
@@ -302,12 +298,170 @@ static inline int siar_valid(struct pt_regs *regs)
        unsigned long mmcra = regs->dsisr;
        int marked = mmcra & MMCRA_SAMPLE_ENABLE;
 
-       if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
-               return mmcra & POWER7P_MMCRA_SIAR_VALID;
+       if (marked) {
+               if (ppmu->flags & PPMU_HAS_SIER)
+                       return regs->dar & SIER_SIAR_VALID;
+
+               if (ppmu->flags & PPMU_SIAR_VALID)
+                       return mmcra & POWER7P_MMCRA_SIAR_VALID;
+       }
 
        return 1;
 }
 
+
+/* Reset all possible BHRB entries */
+static void power_pmu_bhrb_reset(void)
+{
+       asm volatile(PPC_CLRBHRB);
+}
+
+static void power_pmu_bhrb_enable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+       if (!ppmu->bhrb_nr)
+               return;
+
+       /* Clear BHRB if we changed task context to avoid data leaks */
+       if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
+               power_pmu_bhrb_reset();
+               cpuhw->bhrb_context = event->ctx;
+       }
+       cpuhw->bhrb_users++;
+}
+
+static void power_pmu_bhrb_disable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+       if (!ppmu->bhrb_nr)
+               return;
+
+       cpuhw->bhrb_users--;
+       WARN_ON_ONCE(cpuhw->bhrb_users < 0);
+
+       if (!cpuhw->disabled && !cpuhw->bhrb_users) {
+               /* BHRB cannot be turned off when other
+                * events are active on the PMU.
+                */
+
+               /* avoid stale pointer */
+               cpuhw->bhrb_context = NULL;
+       }
+}
+
+/* Called from ctxsw to prevent one process's branch entries to
+ * mingle with the other process's entries during context switch.
+ */
+void power_pmu_flush_branch_stack(void)
+{
+       if (ppmu->bhrb_nr)
+               power_pmu_bhrb_reset();
+}
+/* Calculate the to address for a branch */
+static __u64 power_pmu_bhrb_to(u64 addr)
+{
+       unsigned int instr;
+       int ret;
+       __u64 target;
+
+       if (is_kernel_addr(addr))
+               return branch_target((unsigned int *)addr);
+
+       /* Userspace: need copy instruction here then translate it */
+       pagefault_disable();
+       ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
+       if (ret) {
+               pagefault_enable();
+               return 0;
+       }
+       pagefault_enable();
+
+       target = branch_target(&instr);
+       if ((!target) || (instr & BRANCH_ABSOLUTE))
+               return target;
+
+       /* Translate relative branch target from kernel to user address */
+       return target - (unsigned long)&instr + addr;
+}
+
+/* Processing BHRB entries */
+void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+{
+       u64 val;
+       u64 addr;
+       int r_index, u_index, pred;
+
+       r_index = 0;
+       u_index = 0;
+       while (r_index < ppmu->bhrb_nr) {
+               /* Assembly read function */
+               val = read_bhrb(r_index++);
+               if (!val)
+                       /* Terminal marker: End of valid BHRB entries */
+                       break;
+               else {
+                       addr = val & BHRB_EA;
+                       pred = val & BHRB_PREDICTION;
+
+                       if (!addr)
+                               /* invalid entry */
+                               continue;
+
+                       /* Branches are read most recent first (ie. mfbhrb 0 is
+                        * the most recent branch).
+                        * There are two types of valid entries:
+                        * 1) a target entry which is the to address of a
+                        *    computed goto like a blr,bctr,btar.  The next
+                        *    entry read from the bhrb will be branch
+                        *    corresponding to this target (ie. the actual
+                        *    blr/bctr/btar instruction).
+                        * 2) a from address which is an actual branch.  If a
+                        *    target entry proceeds this, then this is the
+                        *    matching branch for that target.  If this is not
+                        *    following a target entry, then this is a branch
+                        *    where the target is given as an immediate field
+                        *    in the instruction (ie. an i or b form branch).
+                        *    In this case we need to read the instruction from
+                        *    memory to determine the target/to address.
+                        */
+
+                       if (val & BHRB_TARGET) {
+                               /* Target branches use two entries
+                                * (ie. computed gotos/XL form)
+                                */
+                               cpuhw->bhrb_entries[u_index].to = addr;
+                               cpuhw->bhrb_entries[u_index].mispred = pred;
+                               cpuhw->bhrb_entries[u_index].predicted = ~pred;
+
+                               /* Get from address in next entry */
+                               val = read_bhrb(r_index++);
+                               addr = val & BHRB_EA;
+                               if (val & BHRB_TARGET) {
+                                       /* Shouldn't have two targets in a
+                                          row.. Reset index and try again */
+                                       r_index--;
+                                       addr = 0;
+                               }
+                               cpuhw->bhrb_entries[u_index].from = addr;
+                       } else {
+                               /* Branches to immediate field 
+                                  (ie I or B form) */
+                               cpuhw->bhrb_entries[u_index].from = addr;
+                               cpuhw->bhrb_entries[u_index].to =
+                                       power_pmu_bhrb_to(addr);
+                               cpuhw->bhrb_entries[u_index].mispred = pred;
+                               cpuhw->bhrb_entries[u_index].predicted = ~pred;
+                       }
+                       u_index++;
+
+               }
+       }
+       cpuhw->bhrb_stack.nr = u_index;
+       return;
+}
+
 #endif /* CONFIG_PPC64 */
 
 static void perf_event_interrupt(struct pt_regs *regs);
@@ -904,47 +1058,6 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-/* Reset all possible BHRB entries */
-static void power_pmu_bhrb_reset(void)
-{
-       asm volatile(PPC_CLRBHRB);
-}
-
-void power_pmu_bhrb_enable(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
-       if (!ppmu->bhrb_nr)
-               return;
-
-       /* Clear BHRB if we changed task context to avoid data leaks */
-       if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
-               power_pmu_bhrb_reset();
-               cpuhw->bhrb_context = event->ctx;
-       }
-       cpuhw->bhrb_users++;
-}
-
-void power_pmu_bhrb_disable(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
-       if (!ppmu->bhrb_nr)
-               return;
-
-       cpuhw->bhrb_users--;
-       WARN_ON_ONCE(cpuhw->bhrb_users < 0);
-
-       if (!cpuhw->disabled && !cpuhw->bhrb_users) {
-               /* BHRB cannot be turned off when other
-                * events are active on the PMU.
-                */
-
-               /* avoid stale pointer */
-               cpuhw->bhrb_context = NULL;
-       }
-}
-
 /*
  * Add a event to the PMU.
  * If all events are not already frozen, then we disable and
@@ -1180,15 +1293,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
        return 0;
 }
 
-/* Called from ctxsw to prevent one process's branch entries to
- * mingle with the other process's entries during context switch.
- */
-void power_pmu_flush_branch_stack(void)
-{
-       if (ppmu->bhrb_nr)
-               power_pmu_bhrb_reset();
-}
-
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
@@ -1458,77 +1562,6 @@ struct pmu power_pmu = {
        .flush_branch_stack = power_pmu_flush_branch_stack,
 };
 
-/* Processing BHRB entries */
-void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
-{
-       u64 val;
-       u64 addr;
-       int r_index, u_index, target, pred;
-
-       r_index = 0;
-       u_index = 0;
-       while (r_index < ppmu->bhrb_nr) {
-               /* Assembly read function */
-               val = read_bhrb(r_index);
-
-               /* Terminal marker: End of valid BHRB entries */
-               if (val == 0) {
-                       break;
-               } else {
-                       /* BHRB field break up */
-                       addr = val & BHRB_EA;
-                       pred = val & BHRB_PREDICTION;
-                       target = val & BHRB_TARGET;
-
-                       /* Probable Missed entry: Not applicable for POWER8 */
-                       if ((addr == 0) && (target == 0) && (pred == 1)) {
-                               r_index++;
-                               continue;
-                       }
-
-                       /* Real Missed entry: Power8 based missed entry */
-                       if ((addr == 0) && (target == 1) && (pred == 1)) {
-                               r_index++;
-                               continue;
-                       }
-
-                       /* Reserved condition: Not a valid entry  */
-                       if ((addr == 0) && (target == 1) && (pred == 0)) {
-                               r_index++;
-                               continue;
-                       }
-
-                       /* Is a target address */
-                       if (val & BHRB_TARGET) {
-                               /* First address cannot be a target address */
-                               if (r_index == 0) {
-                                       r_index++;
-                                       continue;
-                               }
-
-                               /* Update target address for the previous entry */
-                               cpuhw->bhrb_entries[u_index - 1].to = addr;
-                               cpuhw->bhrb_entries[u_index - 1].mispred = pred;
-                               cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
-
-                               /* Dont increment u_index */
-                               r_index++;
-                       } else {
-                               /* Update address, flags for current entry */
-                               cpuhw->bhrb_entries[u_index].from = addr;
-                               cpuhw->bhrb_entries[u_index].mispred = pred;
-                               cpuhw->bhrb_entries[u_index].predicted = ~pred;
-
-                               /* Successfully popullated one entry */
-                               u_index++;
-                               r_index++;
-                       }
-               }
-       }
-       cpuhw->bhrb_stack.nr = u_index;
-       return;
-}
-
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
@@ -1725,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
                        }
                }
        }
-       if ((!found) && printk_ratelimit())
+       if (!found && !nmi && printk_ratelimit())
                printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
 
        /*
index a881232a3cce1bdafee6f5f2a6536a89afe188ab..b62aab3e22ecd0447caf0db7ff97c2c16bc12644 100644 (file)
@@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON
 
 config RTAS_PROC
        bool "Proc interface to RTAS"
-       depends on PPC_RTAS
+       depends on PPC_RTAS && PROC_FS
        default y
 
 config RTAS_FLASH
index d3e840d643af5135dc0b4e0acb6cec114967264a..c24684c818ab015cc9140d9870f26dc97bfb162a 100644 (file)
@@ -6,6 +6,7 @@ config PPC_POWERNV
        select PPC_ICP_NATIVE
        select PPC_P7_NAP
        select PPC_PCI_CHOICE if EMBEDDED
+       select EPAPR_BOOT
        default y
 
 config POWERNV_MSI
index ade4463226c612fd338b5cbaed1cb941d14881a9..628c564ceadbb32b1c96287f3ba5cb43390cc56a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
 #include <asm/opal.h>
 #include <asm/firmware.h>
 
@@ -28,6 +29,8 @@ struct opal {
 static struct device_node *opal_node;
 static DEFINE_SPINLOCK(opal_write_lock);
 extern u64 opal_mc_secondary_handler[];
+static unsigned int *opal_irqs;
+static unsigned int opal_irq_count;
 
 int __init early_init_dt_scan_opal(unsigned long node,
                                   const char *uname, int depth, void *data)
@@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
                 opal.entry, entryp, entrysz);
 
        powerpc_firmware_features |= FW_FEATURE_OPAL;
-       if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
+       if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
+               powerpc_firmware_features |= FW_FEATURE_OPALv2;
+               powerpc_firmware_features |= FW_FEATURE_OPALv3;
+               printk("OPAL V3 detected !\n");
+       } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
                powerpc_firmware_features |= FW_FEATURE_OPALv2;
                printk("OPAL V2 detected !\n");
        } else {
@@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
                                rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
                len = total_len;
                rc = opal_console_write(vtermno, &len, data);
+
+               /* Closed or other error drop */
+               if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
+                   rc != OPAL_BUSY_EVENT) {
+                       written = total_len;
+                       break;
+               }
                if (rc == OPAL_SUCCESS) {
                        total_len -= len;
                        data += len;
@@ -316,6 +330,8 @@ static int __init opal_init(void)
        irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
        pr_debug("opal: Found %d interrupts reserved for OPAL\n",
                 irqs ? (irqlen / 4) : 0);
+       opal_irq_count = irqlen / 4;
+       opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
        for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
                unsigned int hwirq = be32_to_cpup(irqs);
                unsigned int irq = irq_create_mapping(NULL, hwirq);
@@ -327,7 +343,19 @@ static int __init opal_init(void)
                if (rc)
                        pr_warning("opal: Error %d requesting irq %d"
                                   " (0x%x)\n", rc, irq, hwirq);
+               opal_irqs[i] = irq;
        }
        return 0;
 }
 subsys_initcall(opal_init);
+
+void opal_shutdown(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < opal_irq_count; i++) {
+               if (opal_irqs[i])
+                       free_irq(opal_irqs[i], 0);
+               opal_irqs[i] = 0;
+       }
+}
index 1da578b7c1bfc61d80902df4fdf3e2148fc98d8e..9c9d15e4cdf2700f803471667f83720430844c68 100644 (file)
@@ -68,16 +68,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
 define_pe_printk_level(pe_warn, KERN_WARNING);
 define_pe_printk_level(pe_info, KERN_INFO);
 
-static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
-{
-       struct device_node *np;
-
-       np = pci_device_to_OF_node(dev);
-       if (!np)
-               return NULL;
-       return PCI_DN(np);
-}
-
 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
 {
        unsigned long pe;
@@ -110,7 +100,7 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
        struct pnv_phb *phb = hose->private_data;
-       struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+       struct pci_dn *pdn = pci_get_pdn(dev);
 
        if (!pdn)
                return NULL;
@@ -173,7 +163,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 
        /* Add to all parents PELT-V */
        while (parent) {
-               struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
+               struct pci_dn *pdn = pci_get_pdn(parent);
                if (pdn && pdn->pe_number != IODA_INVALID_PE) {
                        rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
                                                pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
@@ -252,7 +242,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
        struct pnv_phb *phb = hose->private_data;
-       struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+       struct pci_dn *pdn = pci_get_pdn(dev);
        struct pnv_ioda_pe *pe;
        int pe_num;
 
@@ -323,7 +313,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
        struct pci_dev *dev;
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
-               struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+               struct pci_dn *pdn = pci_get_pdn(dev);
 
                if (pdn == NULL) {
                        pr_warn("%s: No device node associated with device !\n",
@@ -436,7 +426,7 @@ static void pnv_pci_ioda_setup_PEs(void)
 
 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
 {
-       struct pci_dn *pdn = pnv_ioda_get_pdn(pdev);
+       struct pci_dn *pdn = pci_get_pdn(pdev);
        struct pnv_ioda_pe *pe;
 
        /*
@@ -768,6 +758,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                                  unsigned int is_64, struct msi_msg *msg)
 {
        struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+       struct pci_dn *pdn = pci_get_pdn(dev);
        struct irq_data *idata;
        struct irq_chip *ichip;
        unsigned int xive_num = hwirq - phb->msi_base;
@@ -783,6 +774,10 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
        if (pe->mve_number < 0)
                return -ENXIO;
 
+       /* Force 32-bit MSI on some broken devices */
+       if (pdn && pdn->force_32bit_msi)
+               is_64 = 0;
+
        /* Assign XIVE to PE */
        rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
        if (rc) {
@@ -1035,7 +1030,7 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
        if (!phb->initialized)
                return 0;
 
-       pdn = pnv_ioda_get_pdn(dev);
+       pdn = pci_get_pdn(dev);
        if (!pdn || pdn->pe_number == IODA_INVALID_PE)
                return -EINVAL;
 
@@ -1048,6 +1043,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
        return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
 }
 
+static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
+{
+       opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
+                      OPAL_ASSERT_RESET);
+}
+
 void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
 {
        struct pci_controller *hose;
@@ -1178,6 +1179,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
        /* Setup TCEs */
        phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
 
+       /* Setup shutdown function for kexec */
+       phb->shutdown = pnv_pci_ioda_shutdown;
+
        /* Setup MSI support */
        pnv_pci_init_ioda_msis(phb);
 
index 55dfca844ddf0f8f38cbb5cb1c6863b71e217110..277343cc6a3d7f87966408f088a33b61f502ecda 100644 (file)
@@ -47,6 +47,10 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
 {
        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
        struct pnv_phb *phb = hose->private_data;
+       struct pci_dn *pdn = pci_get_pdn(pdev);
+
+       if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+               return -ENODEV;
 
        return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
 }
@@ -367,7 +371,7 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
        while (npages--)
                *(tcep++) = 0;
 
-       if (tbl->it_type & TCE_PCI_SWINV_CREATE)
+       if (tbl->it_type & TCE_PCI_SWINV_FREE)
                pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
 }
 
@@ -450,6 +454,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
                pnv_pci_dma_fallback_setup(hose, pdev);
 }
 
+void pnv_pci_shutdown(void)
+{
+       struct pci_controller *hose;
+
+       list_for_each_entry(hose, &hose_list, list_node) {
+               struct pnv_phb *phb = hose->private_data;
+
+               if (phb && phb->shutdown)
+                       phb->shutdown(phb);
+       }
+}
+
 /* Fixup wrong class code in p7ioc and p8 root complex */
 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
 {
index 48dc4bb856a14932878f29b917b5d7551897de66..25d76c4df50b27711c6bd1bb40481216a7009eb6 100644 (file)
@@ -86,6 +86,7 @@ struct pnv_phb {
        void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
        void (*fixup_phb)(struct pci_controller *hose);
        u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
+       void (*shutdown)(struct pnv_phb *phb);
 
        union {
                struct {
@@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np);
 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
                                        u64 *startp, u64 *endp);
+
 #endif /* __POWERNV_PCI_H */
index 8a9df7f9667ede1a97abb5aee79a14843aa5a6ab..a1c6f83fc3916efab1a092e1c7b102ff4a3c788a 100644 (file)
@@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { }
 
 #ifdef CONFIG_PCI
 extern void pnv_pci_init(void);
+extern void pnv_pci_shutdown(void);
 #else
 static inline void pnv_pci_init(void) { }
+static inline void pnv_pci_shutdown(void) { }
 #endif
 
 #endif /* _POWERNV_H */
index db1ad1c8f68fd1ed18a91fbc542a55c9e31d60b1..d4459bfc92f76a7bd45c6714395023786f200cc9 100644 (file)
@@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m)
        if (root)
                model = of_get_property(root, "model", NULL);
        seq_printf(m, "machine\t\t: PowerNV %s\n", model);
-       if (firmware_has_feature(FW_FEATURE_OPALv2))
+       if (firmware_has_feature(FW_FEATURE_OPALv3))
+               seq_printf(m, "firmware\t: OPAL v3\n");
+       else if (firmware_has_feature(FW_FEATURE_OPALv2))
                seq_printf(m, "firmware\t: OPAL v2\n");
        else if (firmware_has_feature(FW_FEATURE_OPAL))
                seq_printf(m, "firmware\t: OPAL v1\n");
@@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex)
 {
 }
 
+static void pnv_shutdown(void)
+{
+       /* Let the PCI code clear up IODA tables */
+       pnv_pci_shutdown();
+
+       /* And unregister all OPAL interrupts so they don't fire
+        * up while we kexec
+        */
+       opal_shutdown();
+}
+
 #ifdef CONFIG_KEXEC
 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 {
@@ -187,6 +200,7 @@ define_machine(powernv) {
        .init_IRQ               = pnv_init_IRQ,
        .show_cpuinfo           = pnv_show_cpuinfo,
        .progress               = pnv_progress,
+       .machine_shutdown       = pnv_shutdown,
        .power_save             = power7_idle,
        .calibrate_decr         = generic_calibrate_decr,
 #ifdef CONFIG_KEXEC
index 6a3ecca5b7253e5dafe06f488f393eeb7c439186..88c9459c3e07121a64765e4304bcbb4dd7a1b2a4 100644 (file)
@@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr)
 
        BUG_ON(nr < 0 || nr >= NR_CPUS);
 
-       /* On OPAL v2 the CPU are still spinning inside OPAL itself,
-        * get them back now
+       /*
+        * If we already started or OPALv2 is not supported, we just
+        * kick the CPU via the PACA
         */
-       if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
-               pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
-               rc = opal_start_cpu(pcpu, start_here);
+       if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
+               goto kick;
+
+       /*
+        * At this point, the CPU can either be spinning on the way in
+        * from kexec or be inside OPAL waiting to be started for the
+        * first time. OPAL v3 allows us to query OPAL to know if it
+        * has the CPUs, so we do that
+        */
+       if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+               uint8_t status;
+
+               rc = opal_query_cpu_status(pcpu, &status);
                if (rc != OPAL_SUCCESS) {
-                       pr_warn("OPAL Error %ld starting CPU %d\n",
+                       pr_warn("OPAL Error %ld querying CPU %d state\n",
                                rc, nr);
                        return -ENODEV;
                }
+
+               /*
+                * Already started, just kick it, probably coming from
+                * kexec and spinning
+                */
+               if (status == OPAL_THREAD_STARTED)
+                       goto kick;
+
+               /*
+                * Available/inactive, let's kick it
+                */
+               if (status == OPAL_THREAD_INACTIVE) {
+                       pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
+                                nr, pcpu);
+                       rc = opal_start_cpu(pcpu, start_here);
+                       if (rc != OPAL_SUCCESS) {
+                               pr_warn("OPAL Error %ld starting CPU %d\n",
+                                       rc, nr);
+                               return -ENODEV;
+                       }
+               } else {
+                       /*
+                        * An unavailable CPU (or any other unknown status)
+                        * shouldn't be started. It should also
+                        * not be in the possible map but currently it can
+                        * happen
+                        */
+                       pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+                                " (status %d)...\n", nr, pcpu, status);
+                       return -ENODEV;
+               }
+       } else {
+               /*
+                * On OPAL v2, we just kick it and hope for the best,
+                * we must not test the error from opal_start_cpu() or
+                * we would fail to get CPUs from kexec.
+                */
+               opal_start_cpu(pcpu, start_here);
        }
+ kick:
        return smp_generic_kick_cpu(nr);
 }
 
index 9a0941bc4d31b30ed78cd2ff0527861ce1570b8a..4459eff7a75ad6b3f4135f015591e2b9575dcec4 100644 (file)
@@ -18,6 +18,9 @@ config PPC_PSERIES
        select PPC_PCI_CHOICE if EXPERT
        select ZLIB_DEFLATE
        select PPC_DOORBELL
+       select HAVE_CONTEXT_TRACKING
+       select HOTPLUG if SMP
+       select HOTPLUG_CPU if SMP
        default y
 
 config PPC_SPLPAR
index 19506f935737d2ee7c27a16d1ecc909cc4b6c518..b456b157d33d107842a42d3b0c3d0da36e9ea121 100644 (file)
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
        ibm_configure_pe                = rtas_token("ibm,configure-pe");
        ibm_configure_bridge            = rtas_token("ibm,configure-bridge");
 
-       /* necessary sanity check */
+       /*
+        * Necessary sanity check. We needn't check "get-config-addr-info"
+        * and its variant since the old firmware probably support address
+        * of domain/bus/slot/function for EEH RTAS operations.
+        */
        if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
                pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
                        __func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
                pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
                        __func__);
                return -EINVAL;
-       } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
-                  ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
-               pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
-                       "<ibm,get-config-addr-info> invalid\n",
-                       __func__);
-               return -EINVAL;
        } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
                   ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
                pr_warning("%s: RTAS service <ibm,configure-pe> and "
index 420524e6f8c95c6118462cefb80723312d213437..6d2f0abce6fae652d207b499ac99a47e20a500f2 100644 (file)
@@ -26,26 +26,6 @@ static int query_token, change_token;
 #define RTAS_CHANGE_MSIX_FN    4
 #define RTAS_CHANGE_32MSI_FN   5
 
-static struct pci_dn *get_pdn(struct pci_dev *pdev)
-{
-       struct device_node *dn;
-       struct pci_dn *pdn;
-
-       dn = pci_device_to_OF_node(pdev);
-       if (!dn) {
-               dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
-               return NULL;
-       }
-
-       pdn = PCI_DN(dn);
-       if (!pdn) {
-               dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
-               return NULL;
-       }
-
-       return pdn;
-}
-
 /* RTAS Helpers */
 
 static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
@@ -91,7 +71,7 @@ static void rtas_disable_msi(struct pci_dev *pdev)
 {
        struct pci_dn *pdn;
 
-       pdn = get_pdn(pdev);
+       pdn = pci_get_pdn(pdev);
        if (!pdn)
                return;
 
@@ -152,7 +132,7 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
        struct pci_dn *pdn;
        const u32 *req_msi;
 
-       pdn = get_pdn(pdev);
+       pdn = pci_get_pdn(pdev);
        if (!pdn)
                return -ENODEV;
 
@@ -394,6 +374,23 @@ static int check_msix_entries(struct pci_dev *pdev)
        return 0;
 }
 
+static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
+{
+       u32 addr_hi, addr_lo;
+
+       /*
+        * We should only get in here for IODA1 configs. This is based on the
+        * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
+        * support, and we are in a PCIe Gen2 slot.
+        */
+       dev_info(&pdev->dev,
+                "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
+       pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
+       addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
+       pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
+       pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
+}
+
 static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
 {
        struct pci_dn *pdn;
@@ -401,8 +398,9 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
        struct msi_desc *entry;
        struct msi_msg msg;
        int nvec = nvec_in;
+       int use_32bit_msi_hack = 0;
 
-       pdn = get_pdn(pdev);
+       pdn = pci_get_pdn(pdev);
        if (!pdn)
                return -ENODEV;
 
@@ -428,15 +426,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
         */
 again:
        if (type == PCI_CAP_ID_MSI) {
-               if (pdn->force_32bit_msi)
+               if (pdn->force_32bit_msi) {
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
-               else
+                       if (rc < 0) {
+                               /*
+                                * We only want to run the 32 bit MSI hack below if
+                                * the max bus speed is Gen2 speed
+                                */
+                               if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
+                                       return rc;
+
+                               use_32bit_msi_hack = 1;
+                       }
+               } else
+                       rc = -1;
+
+               if (rc < 0)
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
 
-               if (rc < 0 && !pdn->force_32bit_msi) {
+               if (rc < 0) {
                        pr_debug("rtas_msi: trying the old firmware call.\n");
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
                }
+
+               if (use_32bit_msi_hack && rc > 0)
+                       rtas_hack_32bit_msi_gen2(pdev);
        } else
                rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
 
@@ -518,12 +532,3 @@ static int rtas_msi_init(void)
 }
 arch_initcall(rtas_msi_init);
 
-static void quirk_radeon(struct pci_dev *dev)
-{
-       struct pci_dn *pdn = get_pdn(dev);
-
-       if (pdn)
-               pdn->force_32bit_msi = 1;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon);
index 47226e04126d10e930a7a4a1bf42aa1c9abedb7a..5f997e79d570f8fb704cf9ec79ef49cb538b3b28 100644 (file)
@@ -16,6 +16,7 @@
   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   */
 
+#include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/suspend.h>
 #include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
                               struct device_attribute *attr,
                               const char *buf, size_t count)
 {
+       cpumask_var_t offline_mask;
        int rc;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+               return -ENOMEM;
+
        stream_id = simple_strtoul(buf, NULL, 16);
 
        do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
        } while (rc == -EAGAIN);
 
        if (!rc) {
+               /* All present CPUs must be online */
+               cpumask_andnot(offline_mask, cpu_present_mask,
+                               cpu_online_mask);
+               rc = rtas_online_cpus_mask(offline_mask);
+               if (rc) {
+                       pr_err("%s: Could not bring present CPUs online.\n",
+                                       __func__);
+                       goto out;
+               }
+
                stop_topology_update();
                rc = pm_suspend(PM_SUSPEND_MEM);
                start_topology_update();
+
+               /* Take down CPUs not online prior to suspend */
+               if (!rtas_offline_cpus_mask(offline_mask))
+                       pr_warn("%s: Could not restore CPUs to offline "
+                                       "state.\n", __func__);
        }
 
        stream_id = 0;
 
        if (!rc)
                rc = count;
+out:
+       free_cpumask_var(offline_mask);
        return rc;
 }
 
index 97fe82ee863334664eb2864f18f0b2a7fbd1dc56..2d3b1dd9571da71aec4b11ab97ef5a0cc106be4f 100644 (file)
@@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
        xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
        wsp_ics_set_xive(ics, hw_irq, xive);
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip wsp_irq_chip = {
index b0a518e9759978a7255a334388d53c79a387ba61..99464a7bdb3b1b0da09493dd4601d6b02951e13e 100644 (file)
@@ -64,6 +64,8 @@ endif
 
 obj-$(CONFIG_PPC_SCOM)         += scom.o
 
+obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS)  += udbg_memcons.o
+
 subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
 
 obj-$(CONFIG_PPC_XICS)         += xics/
index 6e0e1005227f934930982fd4d4827b464d05180d..9cd0e60716fef0f37b068d14f2ad38222595221e 100644 (file)
@@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
        ev_int_set_config(src, config, prio, cpuid);
        spin_unlock_irqrestore(&ehv_pic_lock, flags);
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
index ee21b5e71aecd6379d7e93a4e5b299cb8901cce6..3cc2f9159ab1185f9f8f00ccefbcac14995ae6b1 100644 (file)
@@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock);
 
 #ifdef CONFIG_PPC32    /* XXX for now */
 #ifdef CONFIG_IRQ_ALL_CPUS
-#define distribute_irqs        (!(mpic->flags & MPIC_SINGLE_DEST_CPU))
+#define distribute_irqs        (1)
 #else
 #define distribute_irqs        (0)
 #endif
@@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
                               mpic_physmask(mask));
        }
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
@@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void)
         * it differently, then we should make sure we also change the default
         * values of irq_desc[].affinity in irq.c.
         */
-       if (distribute_irqs) {
+       if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
                for (i = 0; i < mpic->num_sources ; i++)
                        mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
                                mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
new file mode 100644 (file)
index 0000000..ce5a7b4
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * A udbg backend which logs messages and reads input from in memory
+ * buffers.
+ *
+ * The console output can be read from memcons_output which is a
+ * circular buffer whose next write position is stored in memcons.output_pos.
+ *
+ * Input may be passed by writing into the memcons_input buffer when it is
+ * empty. The input buffer is empty when both input_pos == input_start and
+ * *input_start == '\0'.
+ *
+ * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
+ * Copyright (C) 2013 Alistair Popple, IBM Corp
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/barrier.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/udbg.h>
+
+struct memcons {
+       char *output_start;
+       char *output_pos;
+       char *output_end;
+       char *input_start;
+       char *input_pos;
+       char *input_end;
+};
+
+static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
+static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
+
+struct memcons memcons = {
+       .output_start = memcons_output,
+       .output_pos = memcons_output,
+       .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
+       .input_start = memcons_input,
+       .input_pos = memcons_input,
+       .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
+};
+
+void memcons_putc(char c)
+{
+       char *new_output_pos;
+
+       *memcons.output_pos = c;
+       wmb();
+       new_output_pos = memcons.output_pos + 1;
+       if (new_output_pos >= memcons.output_end)
+               new_output_pos = memcons.output_start;
+
+       memcons.output_pos = new_output_pos;
+}
+
+int memcons_getc_poll(void)
+{
+       char c;
+       char *new_input_pos;
+
+       if (*memcons.input_pos) {
+               c = *memcons.input_pos;
+
+               new_input_pos = memcons.input_pos + 1;
+               if (new_input_pos >= memcons.input_end)
+                       new_input_pos = memcons.input_start;
+               else if (*new_input_pos == '\0')
+                       new_input_pos = memcons.input_start;
+
+               *memcons.input_pos = '\0';
+               wmb();
+               memcons.input_pos = new_input_pos;
+               return c;
+       }
+
+       return -1;
+}
+
+int memcons_getc(void)
+{
+       int c;
+
+       while (1) {
+               c = memcons_getc_poll();
+               if (c == -1)
+                       cpu_relax();
+               else
+                       break;
+       }
+
+       return c;
+}
+
+void udbg_init_memcons(void)
+{
+       udbg_putc = memcons_putc;
+       udbg_getc = memcons_getc;
+       udbg_getc_poll = memcons_getc_poll;
+}
index f7e8609df0d5d059ea9230f22c9c9cccf7d3a801..39d72212655e3706ef5ca09dacd689156bc34399 100644 (file)
@@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
                       __func__, d->irq, hw_irq, server, rc);
                return -1;
        }
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip ics_opal_irq_chip = {
index 2c9789da0e249ffae45e5714ea505d13822efd2f..da183c5a103ce1df1da3617bbde788011f0351d3 100644 (file)
@@ -98,7 +98,6 @@ config S390
        select CLONE_BACKWARDS2
        select GENERIC_CLOCKEVENTS
        select GENERIC_CPU_DEVICES if !SMP
-       select GENERIC_KERNEL_THREAD
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_TIME_VSYSCALL_OLD
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
index bae0f402bf2ab7eb3c8c501c23bbd7e8f81e364a..87a22092b68f8b152a7df862d4c47604e10edee5 100644 (file)
@@ -212,7 +212,9 @@ appldata_timer_handler(ctl_table *ctl, int write,
                return 0;
        }
        if (!write) {
-               len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
+               strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
+                       ARRAY_SIZE(buf));
+               len = strnlen(buf, ARRAY_SIZE(buf));
                if (len > *lenp)
                        len = *lenp;
                if (copy_to_user(buffer, buf, len))
@@ -317,7 +319,8 @@ appldata_generic_handler(ctl_table *ctl, int write,
                return 0;
        }
        if (!write) {
-               len = sprintf(buf, ops->active ? "1\n" : "0\n");
+               strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
+               len = strnlen(buf, ARRAY_SIZE(buf));
                if (len > *lenp)
                        len = *lenp;
                if (copy_to_user(buffer, buf, len)) {
index 9411db653baca1153c7701046ec2980d5d67b38e..886ac7d4937a85c8cbe25d9ca52fd67d8cd28795 100644 (file)
@@ -71,8 +71,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
 {
        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
-       dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
        debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+       dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
 }
 
 #endif /* _ASM_S390_DMA_MAPPING_H */
index b7931faaef6d76add44dd3bcb5c461cec5237a94..bf246dae1367333c109f7f28a60789c2f97deb84 100644 (file)
@@ -9,11 +9,6 @@ struct dyn_arch_ftrace { };
 
 #define MCOUNT_ADDR ((long)_mcount)
 
-#ifdef CONFIG_64BIT
-#define MCOUNT_INSN_SIZE  12
-#else
-#define MCOUNT_INSN_SIZE  20
-#endif
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
@@ -21,4 +16,11 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
 }
 
 #endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_64BIT
+#define MCOUNT_INSN_SIZE  12
+#else
+#define MCOUNT_INSN_SIZE  22
+#endif
+
 #endif /* _ASM_S390_FTRACE_H */
index 379d96e2105ea1d60f79a4cd881593a188a61c01..fd9be010f9b2614c4b4e75f00e9fefad18cf0855 100644 (file)
@@ -36,6 +36,7 @@ static inline void * phys_to_virt(unsigned long address)
 }
 
 void *xlate_dev_mem_ptr(unsigned long phys);
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
 void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 
 /*
index 75ce9b065f9f321f6b0ef3eb3c010c10e58802e6..5d64fb7619ccfc41047c3b1bb6ad45ade2704d1d 100644 (file)
@@ -32,7 +32,7 @@
 
 void storage_key_init_range(unsigned long start, unsigned long end);
 
-static unsigned long pfmf(unsigned long function, unsigned long address)
+static inline unsigned long pfmf(unsigned long function, unsigned long address)
 {
        asm volatile(
                "       .insn   rre,0xb9af0000,%[function],%[address]"
@@ -44,17 +44,13 @@ static unsigned long pfmf(unsigned long function, unsigned long address)
 
 static inline void clear_page(void *page)
 {
-       if (MACHINE_HAS_PFMF) {
-               pfmf(0x10000, (unsigned long)page);
-       } else {
-               register unsigned long reg1 asm ("1") = 0;
-               register void *reg2 asm ("2") = page;
-               register unsigned long reg3 asm ("3") = 4096;
-               asm volatile(
-                       "       mvcl    2,0"
-                       : "+d" (reg2), "+d" (reg3) : "d" (reg1)
-                       : "memory", "cc");
-       }
+       register unsigned long reg1 asm ("1") = 0;
+       register void *reg2 asm ("2") = page;
+       register unsigned long reg3 asm ("3") = 4096;
+       asm volatile(
+               "       mvcl    2,0"
+               : "+d" (reg2), "+d" (reg3) : "d" (reg1)
+               : "memory", "cc");
 }
 
 static inline void copy_page(void *to, void *from)
index 4105b8221fddfd3c180caf78ef65eb9350c93193..e8b6e5b8932c39fa7ff0d824d5c60f4ebe5e5fcf 100644 (file)
@@ -306,7 +306,7 @@ extern unsigned long MODULES_END;
 #define RCP_HC_BIT     0x00200000UL
 #define RCP_GR_BIT     0x00040000UL
 #define RCP_GC_BIT     0x00020000UL
-#define RCP_IN_BIT     0x00008000UL    /* IPTE notify bit */
+#define RCP_IN_BIT     0x00002000UL    /* IPTE notify bit */
 
 /* User dirty / referenced bit for KVM's migration feature */
 #define KVM_UR_BIT     0x00008000UL
@@ -374,7 +374,7 @@ extern unsigned long MODULES_END;
 #define RCP_HC_BIT     0x0020000000000000UL
 #define RCP_GR_BIT     0x0004000000000000UL
 #define RCP_GC_BIT     0x0002000000000000UL
-#define RCP_IN_BIT     0x0000800000000000UL    /* IPTE notify bit */
+#define RCP_IN_BIT     0x0000200000000000UL    /* IPTE notify bit */
 
 /* User dirty / referenced bit for KVM's migration feature */
 #define KVM_UR_BIT     0x0000800000000000UL
@@ -623,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
                "       csg     %0,%1,%2\n"
                "       jl      0b\n"
                : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
-               : "Q" (ptep[PTRS_PER_PTE]) : "cc");
+               : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
 #endif
        return __pgste(new);
 }
@@ -635,18 +635,26 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
                "       nihh    %1,0xff7f\n"    /* clear RCP_PCL_BIT */
                "       stg     %1,%0\n"
                : "=Q" (ptep[PTRS_PER_PTE])
-               : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
+               : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
+               : "cc", "memory");
        preempt_enable();
 #endif
 }
 
+static inline void pgste_set(pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+       *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
+#endif
+}
+
 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
 {
 #ifdef CONFIG_PGSTE
        unsigned long address, bits;
        unsigned char skey;
 
-       if (!pte_present(*ptep))
+       if (pte_val(*ptep) & _PAGE_INVALID)
                return pgste;
        address = pte_val(*ptep) & PAGE_MASK;
        skey = page_get_storage_key(address);
@@ -680,7 +688,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
 #ifdef CONFIG_PGSTE
        int young;
 
-       if (!pte_present(*ptep))
+       if (pte_val(*ptep) & _PAGE_INVALID)
                return pgste;
        /* Get referenced bit from storage key */
        young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
@@ -704,17 +712,19 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
 {
 #ifdef CONFIG_PGSTE
        unsigned long address;
-       unsigned long okey, nkey;
+       unsigned long nkey;
 
-       if (!pte_present(entry))
+       if (pte_val(entry) & _PAGE_INVALID)
                return;
+       VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
        address = pte_val(entry) & PAGE_MASK;
-       okey = nkey = page_get_storage_key(address);
-       nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
-       /* Set page access key and fetch protection bit from pgste */
-       nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
-       if (okey != nkey)
-               page_set_storage_key(address, nkey, 0);
+       /*
+        * Set page access key and fetch protection bit from pgste.
+        * The guest C/R information is still in the PGSTE, set real
+        * key C/R to 0.
+        */
+       nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
+       page_set_storage_key(address, nkey, 0);
 #endif
 }
 
@@ -1098,6 +1108,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
        pte = *ptep;
        if (!mm_exclusive(mm))
                __ptep_ipte(address, ptep);
+
+       if (mm_has_pgste(mm)) {
+               pgste = pgste_update_all(&pte, pgste);
+               pgste_set(ptep, pgste);
+       }
        return pte;
 }
 
@@ -1105,9 +1120,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
                                           unsigned long address,
                                           pte_t *ptep, pte_t pte)
 {
+       pgste_t pgste;
+
        if (mm_has_pgste(mm)) {
+               pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
+               pgste_set_key(ptep, pgste, pte);
                pgste_set_pte(ptep, pte);
-               pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
+               pgste_set_unlock(ptep, pgste);
        } else
                *ptep = pte;
 }
index 2dacb306835ce217e91fe40a2c09aeae7edc05eb..0c5105fbaaf335f32f3134ef0acc74bd853fbab6 100644 (file)
@@ -80,4 +80,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _ASM_SOCKET_H */
index 7f4a4a8c847c7a3807fd6d913f0ca93fe5b874b8..be87d3e05a5be69265a6100f87afe2fa60d51137 100644 (file)
@@ -1862,6 +1862,8 @@ void print_fn_code(unsigned char *code, unsigned long len)
        while (len) {
                ptr = buffer;
                opsize = insn_length(*code);
+               if (opsize > len)
+                       break;
                ptr += sprintf(ptr, "%p: ", code);
                for (i = 0; i < opsize; i++)
                        ptr += sprintf(ptr, "%02x", code[i]);
index 298297477257fa86814da0b9599b382bb3ff46ea..87acc38f73c63b5631c25bfc55d619fb9128f842 100644 (file)
@@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
 
 static void show_trace(struct task_struct *task, unsigned long *stack)
 {
+       const unsigned long frame_size =
+               STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        register unsigned long __r15 asm ("15");
        unsigned long sp;
 
@@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
                sp = task ? task->thread.ksp : __r15;
        printk("Call Trace:\n");
 #ifdef CONFIG_CHECK_STACK
-       sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
-                         S390_lowcore.panic_stack);
+       sp = __show_trace(sp,
+                         S390_lowcore.panic_stack + frame_size - 4096,
+                         S390_lowcore.panic_stack + frame_size);
 #endif
-       sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
-                         S390_lowcore.async_stack);
+       sp = __show_trace(sp,
+                         S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                         S390_lowcore.async_stack + frame_size);
        if (task)
                __show_trace(sp, (unsigned long) task_stack_page(task),
                             (unsigned long) task_stack_page(task) + THREAD_SIZE);
index 78bdf0e5dff77999c72b2aae1a7df71347e35053..e3043aef87a96d17d62defc3c14a828625668507 100644 (file)
 #include <trace/syscall.h>
 #include <asm/asm-offsets.h>
 
-#ifdef CONFIG_64BIT
-#define MCOUNT_OFFSET_RET 12
-#else
-#define MCOUNT_OFFSET_RET 22
-#endif
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 void ftrace_disable_code(void);
@@ -155,9 +149,10 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
+       ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
        if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
                goto out;
-       trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
+       trace.func = ip;
        /* Only trace if the calling function expects to. */
        if (!ftrace_graph_entry(&trace)) {
                current->curr_ret_stack--;
index f7fb58903f6aa67d6d78b409286827614ac70a5c..408e866ae548d3ec75e3e5ac672e61531d06b952 100644 (file)
@@ -311,3 +311,67 @@ void measurement_alert_subclass_unregister(void)
        spin_unlock(&ma_subclass_lock);
 }
 EXPORT_SYMBOL(measurement_alert_subclass_unregister);
+
+void synchronize_irq(unsigned int irq)
+{
+       /*
+        * Not needed, the handler is protected by a lock and IRQs that occur
+        * after the handler is deleted are just NOPs.
+        */
+}
+EXPORT_SYMBOL_GPL(synchronize_irq);
+
+#ifndef CONFIG_PCI
+
+/* Only PCI devices have dynamically-defined IRQ handlers */
+
+int request_irq(unsigned int irq, irq_handler_t handler,
+               unsigned long irqflags, const char *devname, void *dev_id)
+{
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+       WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(free_irq);
+
+void enable_irq(unsigned int irq)
+{
+       WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(enable_irq);
+
+void disable_irq(unsigned int irq)
+{
+       WARN_ON(1);
+}
+EXPORT_SYMBOL_GPL(disable_irq);
+
+#endif /* !CONFIG_PCI */
+
+void disable_irq_nosync(unsigned int irq)
+{
+       disable_irq(irq);
+}
+EXPORT_SYMBOL_GPL(disable_irq_nosync);
+
+unsigned long probe_irq_on(void)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_on);
+
+int probe_irq_off(unsigned long val)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_off);
+
+unsigned int probe_irq_mask(unsigned long val)
+{
+       return val;
+}
+EXPORT_SYMBOL_GPL(probe_irq_mask);
index 4567ce20d900c128a23d27a6b8668b72d921149f..08dcf21cb8dfc040e627b23c1ee279b8822110df 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
 
        .section .kprobes.text, "ax"
 
@@ -33,6 +34,7 @@ ENTRY(ftrace_caller)
        la      %r2,0(%r14)
        st      %r0,__SF_BACKCHAIN(%r15)
        la      %r3,0(%r3)
+       ahi     %r2,-MCOUNT_INSN_SIZE
        l       %r14,0b-0b(%r1)
        l       %r14,0(%r14)
        basr    %r14,%r14
index 11332193db30552a7d028de8debf6af8e019531b..1c52eae3396a0845f3176ba8305607ef049f2608 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
 
        .section .kprobes.text, "ax"
 
@@ -29,6 +30,7 @@ ENTRY(ftrace_caller)
        stg     %r1,__SF_BACKCHAIN(%r15)
        lgr     %r2,%r14
        lg      %r3,168(%r15)
+       aghi    %r2,-MCOUNT_INSN_SIZE
        larl    %r14,ftrace_trace_function
        lg      %r14,0(%r14)
        basr    %r14,%r14
index b6506ee32a363749c5effc99d81d551b90b9fc3d..29bd7bec41768c328c17285b4876c428727d41d0 100644 (file)
@@ -225,7 +225,7 @@ _sclp_print:
        ahi     %r2,1
        ltr     %r0,%r0                         # end of string?
        jz      .LfinalizemtoS4
-       chi     %r0,0x15                        # end of line (NL)?
+       chi     %r0,0x0a                        # end of line (NL)?
        jz      .LfinalizemtoS4
        stc     %r0,0(%r6,%r7)                  # copy to mto
        la      %r11,0(%r6,%r7)
index 8074cb4b7cbf9ea7988160131cf2481241a5115b..4f977d0d25c2d1b1af674314a0b3da263d602b5a 100644 (file)
@@ -428,34 +428,27 @@ void smp_stop_cpu(void)
  * This is the main routine where commands issued by other
  * cpus are handled.
  */
-static void do_ext_call_interrupt(struct ext_code ext_code,
-                                 unsigned int param32, unsigned long param64)
+static void smp_handle_ext_call(void)
 {
        unsigned long bits;
-       int cpu;
-
-       cpu = smp_processor_id();
-       if (ext_code.code == 0x1202)
-               inc_irq_stat(IRQEXT_EXC);
-       else
-               inc_irq_stat(IRQEXT_EMS);
-       /*
-        * handle bit signal external calls
-        */
-       bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
 
+       /* handle bit signal external calls */
+       bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
        if (test_bit(ec_stop_cpu, &bits))
                smp_stop_cpu();
-
        if (test_bit(ec_schedule, &bits))
                scheduler_ipi();
-
        if (test_bit(ec_call_function, &bits))
                generic_smp_call_function_interrupt();
-
        if (test_bit(ec_call_function_single, &bits))
                generic_smp_call_function_single_interrupt();
+}
 
+static void do_ext_call_interrupt(struct ext_code ext_code,
+                                 unsigned int param32, unsigned long param64)
+{
+       inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
+       smp_handle_ext_call();
 }
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -645,7 +638,7 @@ static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
                        continue;
                pcpu = pcpu_devices + cpu;
                pcpu->address = info->cpu[i].address;
-               pcpu->state = (cpu >= info->configured) ?
+               pcpu->state = (i >= info->configured) ?
                        CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
                smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
                set_cpu_present(cpu, true);
@@ -760,6 +753,8 @@ int __cpu_disable(void)
 {
        unsigned long cregs[16];
 
+       /* Handle possible pending IPIs */
+       smp_handle_ext_call();
        set_cpu_online(smp_processor_id(), false);
        /* Disable pseudo page faults on this cpu. */
        pfault_fini();
index 7805ddca833d7092149958e8ed56522598b2db07..a938b548f07e2d18c5510dbe99e47cdf231265a8 100644 (file)
@@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
        mp = (struct gmap_pgtable *) page->index;
        rmap->gmap = gmap;
        rmap->entry = segment_ptr;
-       rmap->vmaddr = address;
+       rmap->vmaddr = address & PMD_MASK;
        spin_lock(&mm->page_table_lock);
        if (*segment_ptr == segment) {
                list_add(&rmap->list, &mp->mapper);
@@ -677,8 +677,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
                        break;
                }
                /* Get the page mapped */
-               if (get_user_pages(current, gmap->mm, addr, 1, 1, 0,
-                                  NULL, NULL) != 1) {
+               if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
                        rc = -EFAULT;
                        break;
                }
index e6f15b5d8b7d0f8d15c18d71869b5df488606056..f1e5be85d592a421fd51abe79f3210053a9c2cdc 100644 (file)
@@ -302,15 +302,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
        return rc;
 }
 
-void synchronize_irq(unsigned int irq)
-{
-       /*
-        * Not needed, the handler is protected by a lock and IRQs that occur
-        * after the handler is deleted are just NOPs.
-        */
-}
-EXPORT_SYMBOL_GPL(synchronize_irq);
-
 void enable_irq(unsigned int irq)
 {
        struct msi_desc *msi = irq_get_msi_desc(irq);
@@ -327,30 +318,6 @@ void disable_irq(unsigned int irq)
 }
 EXPORT_SYMBOL_GPL(disable_irq);
 
-void disable_irq_nosync(unsigned int irq)
-{
-       disable_irq(irq);
-}
-EXPORT_SYMBOL_GPL(disable_irq_nosync);
-
-unsigned long probe_irq_on(void)
-{
-       return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_on);
-
-int probe_irq_off(unsigned long val)
-{
-       return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_off);
-
-unsigned int probe_irq_mask(unsigned long val)
-{
-       return val;
-}
-EXPORT_SYMBOL_GPL(probe_irq_mask);
-
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
 }
index d8f988a37d16a59518bac5e612a6d19d2bb0ad95..0940682ab38bdd700227226e22803e6310096aa5 100644 (file)
@@ -41,8 +41,6 @@
 unsigned long empty_zero_page;
 EXPORT_SYMBOL_GPL(empty_zero_page);
 
-static struct kcore_list kcore_mem, kcore_vmalloc;
-
 static void setup_zero_page(void)
 {
        struct page *page;
index d71a0bcf8145d529208ff9d053e8b6cb497eb45b..4d94dff9015c97fe27a58affaab8c8a4c37b132b 100644 (file)
@@ -85,7 +85,7 @@ static struct sh_eth_plat_data sh7763_eth_pdata = {
 };
 
 static struct platform_device espt_eth_device = {
-       .name       = "sh-eth",
+       .name       = "sh7763-gether",
        .resource   = sh_eth_resources,
        .num_resources  = ARRAY_SIZE(sh_eth_resources),
        .dev        = {
index 41f86702eb9ff80dfd48e178b5b1e0cf287a4fa6..4f114d1cd0198ea78160adf44450c8420ac8cfa1 100644 (file)
@@ -82,7 +82,7 @@ static struct sh_eth_plat_data sh7757_eth0_pdata = {
 };
 
 static struct platform_device sh7757_eth0_device = {
-       .name           = "sh-eth",
+       .name           = "sh7757-ether",
        .resource       = sh_eth0_resources,
        .id             = 0,
        .num_resources  = ARRAY_SIZE(sh_eth0_resources),
@@ -111,7 +111,7 @@ static struct sh_eth_plat_data sh7757_eth1_pdata = {
 };
 
 static struct platform_device sh7757_eth1_device = {
-       .name           = "sh-eth",
+       .name           = "sh7757-ether",
        .resource       = sh_eth1_resources,
        .id             = 1,
        .num_resources  = ARRAY_SIZE(sh_eth1_resources),
@@ -157,7 +157,7 @@ static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
 };
 
 static struct platform_device sh7757_eth_giga0_device = {
-       .name           = "sh-eth",
+       .name           = "sh7757-gether",
        .resource       = sh_eth_giga0_resources,
        .id             = 2,
        .num_resources  = ARRAY_SIZE(sh_eth_giga0_resources),
@@ -192,7 +192,7 @@ static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
 };
 
 static struct platform_device sh7757_eth_giga1_device = {
-       .name           = "sh-eth",
+       .name           = "sh7757-gether",
        .resource       = sh_eth_giga1_resources,
        .id             = 3,
        .num_resources  = ARRAY_SIZE(sh_eth_giga1_resources),
index 764530c85aa9a30ba94dcf5a941c4ce300995fea..61fade0ffa965736427b2b2091ad50d097b59d39 100644 (file)
@@ -165,8 +165,8 @@ static struct sh_eth_plat_data sh_eth_plat = {
 };
 
 static struct platform_device sh_eth_device = {
-       .name = "sh-eth",
-       .id     = 0,
+       .name = "sh7724-ether",
+       .id = 0,
        .dev = {
                .platform_data = &sh_eth_plat,
        },
index 9759d6ba7ffb4d8d967f30d0ee0d72fe00c886fb..658326f44df819eaba9d7d20dbe888d28b796d1f 100644 (file)
@@ -128,8 +128,8 @@ static struct resource sh_eth0_resources[] = {
 };
 
 static struct platform_device sh_eth0_device = {
-       .name = "sh-eth",
-       .id     = 0,
+       .name = "sh771x-ether",
+       .id = 0,
        .dev = {
                .platform_data = PHY_ID,
        },
@@ -151,8 +151,8 @@ static struct resource sh_eth1_resources[] = {
 };
 
 static struct platform_device sh_eth1_device = {
-       .name = "sh-eth",
-       .id     = 1,
+       .name = "sh771x-ether",
+       .id = 1,
        .dev = {
                .platform_data = PHY_ID,
        },
index 4010e63e82d8491422457d32459a8d216455d4ef..b70180ef3e2978832f33f2aec85322a183978b22 100644 (file)
@@ -380,8 +380,8 @@ static struct sh_eth_plat_data sh_eth_plat = {
 };
 
 static struct platform_device sh_eth_device = {
-       .name = "sh-eth",
-       .id     = 0,
+       .name = "sh7724-ether",
+       .id = 0,
        .dev = {
                .platform_data = &sh_eth_plat,
        },
index b7c75298dfb500d9cdadef5c316403c1767abef1..50ba481fa240c155c0764572d11c5ef74cedaac8 100644 (file)
@@ -93,7 +93,7 @@ static struct sh_eth_plat_data sh7763_eth_pdata = {
 };
 
 static struct platform_device sh7763rdp_eth_device = {
-       .name       = "sh-eth",
+       .name       = "sh7763-gether",
        .resource   = sh_eth_resources,
        .num_resources  = ARRAY_SIZE(sh_eth_resources),
        .dev        = {
index e0b740c831c7e172f42beb3c8ce957d9a22bb032..bb11e19251784289f50ed7ce089af68ff23674eb 100644 (file)
@@ -124,8 +124,8 @@ static struct resource eth_resources[] = {
 };
 
 static struct platform_device eth_device = {
-       .name = "sh-eth",
-       .id     = -1,
+       .name = "sh7619-ether",
+       .id = -1,
        .dev = {
                .platform_data = (void *)1,
        },
index 5f30f805d2f237fe09588a9d1594e9f52a6974ba..0128af3399b7f69bb7e534247289a1c73b666b63 100644 (file)
@@ -329,7 +329,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]),
        CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]),
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]),
-       CLKDEV_DEV_ID("sh-eth.0", &mstp_clks[HWBLK_ETHER]),
+       CLKDEV_DEV_ID("sh7724-ether.0", &mstp_clks[HWBLK_ETHER]),
        CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
        CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
        CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
index deb683abacf0f6055dcbb3e0ef65b47071e933de..ed9501519ab36355e0f572f2c6b0c0669f6d711e 100644 (file)
@@ -238,7 +238,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_CON_ID("adc0", &mstp_clks[MSTP313]),
        CLKDEV_CON_ID("mtu0", &mstp_clks[MSTP312]),
        CLKDEV_CON_ID("iebus0", &mstp_clks[MSTP304]),
-       CLKDEV_DEV_ID("sh-eth.0", &mstp_clks[MSTP114]),
+       CLKDEV_DEV_ID("sh7734-gether.0", &mstp_clks[MSTP114]),
        CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP303]),
        CLKDEV_CON_ID("hif0", &mstp_clks[MSTP302]),
        CLKDEV_CON_ID("stif0", &mstp_clks[MSTP301]),
index 89f49b68a21c0804d6cbc55931b8beadf19f6711..b46c3fa0b2653ff93ce0af66eca6df2b2d3a1277 100644 (file)
@@ -70,6 +70,8 @@
 
 #define SO_SELECT_ERR_QUEUE    0x0029
 
+#define SO_LL                  0x0030
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT       0x5002
index 9f20566b0773a8b17003176c0159aea062935f39..79cc0d1a477d0dbeb80178fb977c0d9e04bd24f5 100644 (file)
@@ -54,6 +54,7 @@ EXPORT_SYMBOL(of_set_property_mutex);
 int of_set_property(struct device_node *dp, const char *name, void *val, int len)
 {
        struct property **prevp;
+       unsigned long flags;
        void *new_val;
        int err;
 
@@ -64,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
        err = -ENODEV;
 
        mutex_lock(&of_set_property_mutex);
-       raw_spin_lock(&devtree_lock);
+       raw_spin_lock_irqsave(&devtree_lock, flags);
        prevp = &dp->properties;
        while (*prevp) {
                struct property *prop = *prevp;
@@ -91,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
                }
                prevp = &(*prevp)->next;
        }
-       raw_spin_unlock(&devtree_lock);
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
        mutex_unlock(&of_set_property_mutex);
 
        /* XXX Upate procfs if necessary... */
index d36a85ebb5e027c38929c2a5a8f9e7be772984b9..9c7be59e6f5ad3ad360facd10c6a2c35fd3bde75 100644 (file)
@@ -785,9 +785,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf];
                        break;
                }
                if (proglen == oldproglen) {
-                       image = module_alloc(max_t(unsigned int,
-                                                  proglen,
-                                                  sizeof(struct work_struct)));
+                       image = module_alloc(proglen);
                        if (!image)
                                goto out;
                }
@@ -806,20 +804,8 @@ out:
        return;
 }
 
-static void jit_free_defer(struct work_struct *arg)
-{
-       module_free(NULL, arg);
-}
-
-/* run from softirq, we must use a work_struct to call
- * module_free() from process context
- */
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter) {
-               struct work_struct *work = (struct work_struct *)fp->bpf_func;
-
-               INIT_WORK(work, jit_free_defer);
-               schedule_work(work);
-       }
+       if (fp->bpf_func != sk_run_filter)
+               module_free(NULL, fp->bpf_func);
 }
index 6a154a91c7e746342f35cf6aa13bdae24f544a88..685692c94f051a8a7ad582442efcc3fa11173332 100644 (file)
@@ -108,7 +108,6 @@ config X86
        select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
        select GENERIC_TIME_VSYSCALL if X86_64
        select KTIME_SCALAR if X86_32
-       select ALWAYS_USE_PERSISTENT_CLOCK
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select HAVE_CONTEXT_TRACKING if X86_64
index 35ee62fccf9827c766007021ff1c65bbdb55c53b..c205035a6b96b836ef683ddf8592dbc9c82200e6 100644 (file)
@@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
        *size = len;
 }
 
-static efi_status_t setup_efi_vars(struct boot_params *params)
-{
-       struct setup_data *data;
-       struct efi_var_bootdata *efidata;
-       u64 store_size, remaining_size, var_size;
-       efi_status_t status;
-
-       if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
-               return EFI_UNSUPPORTED;
-
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
-
-       status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
-                               EFI_VARIABLE_NON_VOLATILE |
-                               EFI_VARIABLE_BOOTSERVICE_ACCESS |
-                               EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
-                               &remaining_size, &var_size);
-
-       if (status != EFI_SUCCESS)
-               return status;
-
-       status = efi_call_phys3(sys_table->boottime->allocate_pool,
-                               EFI_LOADER_DATA, sizeof(*efidata), &efidata);
-
-       if (status != EFI_SUCCESS)
-               return status;
-
-       efidata->data.type = SETUP_EFI_VARS;
-       efidata->data.len = sizeof(struct efi_var_bootdata) -
-               sizeof(struct setup_data);
-       efidata->data.next = 0;
-       efidata->store_size = store_size;
-       efidata->remaining_size = remaining_size;
-       efidata->max_var_size = var_size;
-
-       if (data)
-               data->next = (unsigned long)efidata;
-       else
-               params->hdr.setup_data = (unsigned long)efidata;
-
-}
-
 static efi_status_t setup_efi_pci(struct boot_params *params)
 {
        efi_pci_io_protocol *pci;
@@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
 
        setup_graphics(boot_params);
 
-       setup_efi_vars(boot_params);
-
        setup_efi_pci(boot_params);
 
        status = efi_call_phys3(sys_table->boottime->allocate_pool,
index 94c27df8a549c2ce21234c4a621a5a963a917fc4..f247304299a28b1f3245563b2437ab54a8a82889 100644 (file)
@@ -240,7 +240,7 @@ fold_64:
        pand    %xmm3, %xmm1
        PCLMULQDQ 0x00, CONSTANT, %xmm1
        pxor    %xmm2, %xmm1
-       pextrd  $0x01, %xmm1, %eax
+       PEXTRD  0x01, %xmm1, %eax
 
        ret
 ENDPROC(crc32_pclmul_le_16)
index 56610c4bf31b22a47ef58a0362fcabe3cd8437f8..642f15687a0ac4205f59850cacab030768eaf6d4 100644 (file)
@@ -118,7 +118,7 @@ y2 = %r15d
 
 _INP_END_SIZE = 8
 _INP_SIZE = 8
-_XFER_SIZE = 8
+_XFER_SIZE = 16
 _XMM_SAVE_SIZE = 0
 
 _INP_END = 0
index 98d3c391da81b5de50469c7c0d4c33afb7e90b8a..f833b74d902ba87919184ba7828b226ee067c096 100644 (file)
@@ -111,7 +111,7 @@ y2 = %r15d
 
 _INP_END_SIZE = 8
 _INP_SIZE = 8
-_XFER_SIZE = 8
+_XFER_SIZE = 16
 _XMM_SAVE_SIZE = 0
 
 _INP_END = 0
index 2fb5d5884e2331b7d6d299621bfb5c91fa701bf2..60c89f30c727458df128543a6302e95aeede8758 100644 (file)
@@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void);
 extern void efi_unmap_memmap(void);
 extern void efi_memory_uc(u64 addr, unsigned long size);
 
-struct efi_var_bootdata {
-       struct setup_data data;
-       u64 store_size;
-       u64 remaining_size;
-       u64 max_var_size;
-};
-
 #ifdef CONFIG_EFI
 
 static inline bool efi_is_native(void)
index 280bf7fb6aba5c0e1976c852a71692d1d3f888b2..3e115273ed885110de583ce5b228ad6caf993eec 100644 (file)
@@ -9,12 +9,68 @@
 
 #define REG_NUM_INVALID                100
 
-#define REG_TYPE_R64           0
-#define REG_TYPE_XMM           1
+#define REG_TYPE_R32           0
+#define REG_TYPE_R64           1
+#define REG_TYPE_XMM           2
 #define REG_TYPE_INVALID       100
 
+       .macro R32_NUM opd r32
+       \opd = REG_NUM_INVALID
+       .ifc \r32,%eax
+       \opd = 0
+       .endif
+       .ifc \r32,%ecx
+       \opd = 1
+       .endif
+       .ifc \r32,%edx
+       \opd = 2
+       .endif
+       .ifc \r32,%ebx
+       \opd = 3
+       .endif
+       .ifc \r32,%esp
+       \opd = 4
+       .endif
+       .ifc \r32,%ebp
+       \opd = 5
+       .endif
+       .ifc \r32,%esi
+       \opd = 6
+       .endif
+       .ifc \r32,%edi
+       \opd = 7
+       .endif
+#ifdef CONFIG_X86_64
+       .ifc \r32,%r8d
+       \opd = 8
+       .endif
+       .ifc \r32,%r9d
+       \opd = 9
+       .endif
+       .ifc \r32,%r10d
+       \opd = 10
+       .endif
+       .ifc \r32,%r11d
+       \opd = 11
+       .endif
+       .ifc \r32,%r12d
+       \opd = 12
+       .endif
+       .ifc \r32,%r13d
+       \opd = 13
+       .endif
+       .ifc \r32,%r14d
+       \opd = 14
+       .endif
+       .ifc \r32,%r15d
+       \opd = 15
+       .endif
+#endif
+       .endm
+
        .macro R64_NUM opd r64
        \opd = REG_NUM_INVALID
+#ifdef CONFIG_X86_64
        .ifc \r64,%rax
        \opd = 0
        .endif
        .ifc \r64,%r15
        \opd = 15
        .endif
+#endif
        .endm
 
        .macro XMM_NUM opd xmm
        .endm
 
        .macro REG_TYPE type reg
+       R32_NUM reg_type_r32 \reg
        R64_NUM reg_type_r64 \reg
        XMM_NUM reg_type_xmm \reg
        .if reg_type_r64 <> REG_NUM_INVALID
        \type = REG_TYPE_R64
+       .elseif reg_type_r32 <> REG_NUM_INVALID
+       \type = REG_TYPE_R32
        .elseif reg_type_xmm <> REG_NUM_INVALID
        \type = REG_TYPE_XMM
        .else
        .byte \imm8
        .endm
 
+       .macro PEXTRD imm8 xmm gpr
+       R32_NUM extrd_opd1 \gpr
+       XMM_NUM extrd_opd2 \xmm
+       PFX_OPD_SIZE
+       PFX_REX extrd_opd1 extrd_opd2
+       .byte 0x0f, 0x3a, 0x16
+       MODRM 0xc0 extrd_opd1 extrd_opd2
+       .byte \imm8
+       .endm
+
        .macro AESKEYGENASSIST rcon xmm1 xmm2
        XMM_NUM aeskeygen_opd1 \xmm1
        XMM_NUM aeskeygen_opd2 \xmm2
index 08744242b8d24c9111d2275ff6d84752d725584e..c15ddaf907107134d6cd2f8d86f554e510a6f848 100644 (file)
@@ -6,7 +6,6 @@
 #define SETUP_E820_EXT                 1
 #define SETUP_DTB                      2
 #define SETUP_PCI                      3
-#define SETUP_EFI_VARS                 4
 
 /* ram_size flags */
 #define RAMDISK_IMAGE_START_MASK       0x07FF
index dab95a85f7f8590d240790e981830ea91f729cdd..55b67614ed942fad37e13fe5a48e4fab19f4feea 100644 (file)
@@ -34,7 +34,7 @@
 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
 static unsigned int __initdata next_early_pgt = 2;
-pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
+pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 
 /* Wipe all early page tables except for the kernel symbol map */
 static void __init reset_early_page_tables(void)
index 08f7e8039099dc6b8787bbad4797553d7da6ac3f..321d65ebaffe255bbb1dc1bc6aaf2ac217d55aaf 100644 (file)
@@ -115,8 +115,10 @@ startup_64:
        movq    %rdi, %rax
        shrq    $PUD_SHIFT, %rax
        andl    $(PTRS_PER_PUD-1), %eax
-       movq    %rdx, (4096+0)(%rbx,%rax,8)
-       movq    %rdx, (4096+8)(%rbx,%rax,8)
+       movq    %rdx, 4096(%rbx,%rax,8)
+       incl    %eax
+       andl    $(PTRS_PER_PUD-1), %eax
+       movq    %rdx, 4096(%rbx,%rax,8)
 
        addq    $8192, %rbx
        movq    %rdi, %rax
index 245a71db401af0e50be0deeba56d680c278ca2fc..cb339097b9ea0cf4f57b2b406fa308a489cee881 100644 (file)
 /*
  * Were we in an interrupt that interrupted kernel mode?
  *
- * For now, with eagerfpu we will return interrupted kernel FPU
- * state as not-idle. TBD: Ideally we can change the return value
- * to something like __thread_has_fpu(current). But we need to
- * be careful of doing __thread_clear_has_fpu() before saving
- * the FPU etc for supporting nested uses etc. For now, take
- * the simple route!
- *
  * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
  * pair does nothing at all: the thread must not have fpu (so
  * that we don't try to save the FPU state), and TS must
  * be set (so that the clts/stts pair does nothing that is
  * visible in the interrupted kernel thread).
+ *
+ * Except for the eagerfpu case when we return 1 unless we've already
+ * been eager and saved the state in kernel_fpu_begin().
  */
 static inline bool interrupted_kernel_fpu_idle(void)
 {
        if (use_eager_fpu())
-               return 0;
+               return __thread_has_fpu(current);
 
        return !__thread_has_fpu(current) &&
                (read_cr0() & X86_CR0_TS);
@@ -78,8 +74,8 @@ void __kernel_fpu_begin(void)
        struct task_struct *me = current;
 
        if (__thread_has_fpu(me)) {
-               __save_init_fpu(me);
                __thread_clear_has_fpu(me);
+               __save_init_fpu(me);
                /* We do 'stts()' in __kernel_fpu_end() */
        } else if (!use_eager_fpu()) {
                this_cpu_write(fpu_owner_task, NULL);
index d893e8ed8ac96559b2175b00c3d68d71658a4105..2e9e12871c2b51a9fe48068369f8dd4346a4db51 100644 (file)
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
 #endif
 
 #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
+static DEFINE_MUTEX(x86_cpu_microcode_mutex);
 /*
  * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
  * hot added or resumes.
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
         * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
         * hotplug.
         */
-       cpu_hotplug_driver_lock();
+       mutex_lock(&x86_cpu_microcode_mutex);
 
        mc_saved_count_init = mc_saved_data.mc_saved_count;
        mc_saved_count = mc_saved_data.mc_saved_count;
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
        }
 
 out:
-       cpu_hotplug_driver_unlock();
+       mutex_unlock(&x86_cpu_microcode_mutex);
 
        return ret;
 }
index 607af0d4d5ef5f34afc0d5e7caad375dafff6663..4e7a37ff03ab9f6aba634be08f7650e1ef5f2f7c 100644 (file)
@@ -312,6 +312,8 @@ void arch_cpu_idle(void)
 {
        if (cpuidle_idle_call())
                x86_idle();
+       else
+               local_irq_enable();
 }
 
 /*
@@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu)
  */
 static void amd_e400_idle(void)
 {
-       if (need_resched())
-               return;
-
        if (!amd_e400_c1e_detected) {
                u32 lo, hi;
 
index 7a6f3b3be3cfcd9c071672e2275213c3c7715607..f2bb9c96720ace7ac54b6a1fc713cb5a002d7379 100644 (file)
@@ -160,7 +160,7 @@ identity_mapped:
        xorq    %rbp, %rbp
        xorq    %r8,  %r8
        xorq    %r9,  %r9
-       xorq    %r10, %r9
+       xorq    %r10, %r10
        xorq    %r11, %r11
        xorq    %r12, %r12
        xorq    %r13, %r13
index 8db0010ed150d5c1fa1d0b6aa96668587c2d1004..5953dcea752d08e950d62293abbdec94ae95f62b 100644 (file)
@@ -1240,9 +1240,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
        ctxt->modrm_seg = VCPU_SREG_DS;
 
        if (ctxt->modrm_mod == 3) {
+               int highbyte_regs = ctxt->rex_prefix == 0;
+
                op->type = OP_REG;
                op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
-               op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp);
+               op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
+                                              highbyte_regs && (ctxt->d & ByteOp));
                if (ctxt->d & Sse) {
                        op->type = OP_XMM;
                        op->bytes = 16;
@@ -3997,7 +4000,8 @@ static const struct opcode twobyte_table[256] = {
        DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
        N, D(ImplicitOps | ModRM), N, N,
        /* 0x10 - 0x1F */
-       N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
+       N, N, N, N, N, N, N, N,
+       D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
        /* 0x20 - 0x2F */
        DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
        DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
@@ -4836,6 +4840,7 @@ twobyte_insn:
        case 0x08:              /* invd */
        case 0x0d:              /* GrpP (prefetch) */
        case 0x18:              /* Grp16 (prefetch/nop) */
+       case 0x1f:              /* nop */
                break;
        case 0x20: /* mov cr, reg */
                ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
index e1adbb4aca753657bfa246ecf56e3b52b130a9ab..0eee2c8b64d1cafecdf7f587dbeef5566b4449df 100644 (file)
@@ -1861,11 +1861,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        unsigned int sipi_vector;
+       unsigned long pe;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
+       if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
                return;
 
-       if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
+       pe = xchg(&apic->pending_events, 0);
+
+       if (test_bit(KVM_APIC_INIT, &pe)) {
                kvm_lapic_reset(vcpu);
                kvm_vcpu_reset(vcpu);
                if (kvm_vcpu_is_bsp(apic->vcpu))
@@ -1873,7 +1876,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                else
                        vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
        }
-       if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) &&
+       if (test_bit(KVM_APIC_SIPI, &pe) &&
            vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
                /* evaluate pending_events before reading the vector */
                smp_rmb();
index fdc5dca14fb35de5537765acca869ccfb447262d..1f34e9219775b2251c72cbb3234fef0ebec600f3 100644 (file)
@@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
        end_pfn = limit_pfn;
        nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 
+       if (!after_bootmem)
+               adjust_range_page_size_mask(mr, nr_range);
+
        /* try to merge same page size and continuous */
        for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
                unsigned long old_start;
@@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
                nr_range--;
        }
 
-       if (!after_bootmem)
-               adjust_range_page_size_mask(mr, nr_range);
-
        for (i = 0; i < nr_range; i++)
                printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
                                mr[i].start, mr[i].end - 1,
@@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 }
 
 /*
- * would have hole in the middle or ends, and only ram parts will be mapped.
+ * We need to iterate through the E820 memory map and create direct mappings
+ * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
+ * create direct mappings for all pfns from [0 to max_low_pfn) and
+ * [4GB to max_pfn) because of possible memory holes in high addresses
+ * that cannot be marked as UC by fixed/variable range MTRRs.
+ * Depending on the alignment of E820 ranges, this may possibly result
+ * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
+ *
+ * init_mem_mapping() calls init_range_memory_mapping() with big range.
+ * That range would have hole in the middle or ends, and only ram parts
+ * will be mapped in init_range_memory_mapping().
  */
 static unsigned long __init init_range_memory_mapping(
                                           unsigned long r_start,
@@ -419,6 +429,13 @@ void __init init_mem_mapping(void)
        max_pfn_mapped = 0; /* will get exact value next */
        min_pfn_mapped = real_end >> PAGE_SHIFT;
        last_start = start = real_end;
+
+       /*
+        * We start from the top (end of memory) and go to the bottom.
+        * The memblock_find_in_range() gets us a block of RAM from the
+        * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
+        * for page table.
+        */
        while (last_start > ISA_END_ADDRESS) {
                if (last_start > step_size) {
                        start = round_down(last_start - 1, step_size);
index f66b54086ce597562d72f03d5bbaacfb09622005..79c216aa0e2baaac3a65a43972161922489c4978 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/netdevice.h>
 #include <linux/filter.h>
 #include <linux/if_vlan.h>
+#include <linux/random.h>
 
 /*
  * Conventions :
@@ -144,6 +145,39 @@ static int pkt_type_offset(void)
        return -1;
 }
 
+struct bpf_binary_header {
+       unsigned int    pages;
+       /* Note : for security reasons, bpf code will follow a randomly
+        * sized amount of int3 instructions
+        */
+       u8              image[];
+};
+
+static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
+                                                 u8 **image_ptr)
+{
+       unsigned int sz, hole;
+       struct bpf_binary_header *header;
+
+       /* Most of BPF filters are really small,
+        * but if some of them fill a page, allow at least
+        * 128 extra bytes to insert a random section of int3
+        */
+       sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
+       header = module_alloc(sz);
+       if (!header)
+               return NULL;
+
+       memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
+
+       header->pages = sz / PAGE_SIZE;
+       hole = sz - (proglen + sizeof(*header));
+
+       /* insert a random number of int3 instructions before BPF code */
+       *image_ptr = &header->image[prandom_u32() % hole];
+       return header;
+}
+
 void bpf_jit_compile(struct sk_filter *fp)
 {
        u8 temp[64];
@@ -153,6 +187,7 @@ void bpf_jit_compile(struct sk_filter *fp)
        int t_offset, f_offset;
        u8 t_op, f_op, seen = 0, pass;
        u8 *image = NULL;
+       struct bpf_binary_header *header = NULL;
        u8 *func;
        int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
        unsigned int cleanup_addr; /* epilogue code offset */
@@ -693,7 +728,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                if (unlikely(proglen + ilen > oldproglen)) {
                                        pr_err("bpb_jit_compile fatal error\n");
                                        kfree(addrs);
-                                       module_free(NULL, image);
+                                       module_free(NULL, header);
                                        return;
                                }
                                memcpy(image + proglen, temp, ilen);
@@ -717,10 +752,8 @@ cond_branch:                       f_offset = addrs[i + filter[i].jf] - addrs[i];
                        break;
                }
                if (proglen == oldproglen) {
-                       image = module_alloc(max_t(unsigned int,
-                                                  proglen,
-                                                  sizeof(struct work_struct)));
-                       if (!image)
+                       header = bpf_alloc_binary(proglen, &image);
+                       if (!header)
                                goto out;
                }
                oldproglen = proglen;
@@ -730,7 +763,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                bpf_jit_dump(flen, proglen, pass, image);
 
        if (image) {
-               bpf_flush_icache(image, image + proglen);
+               bpf_flush_icache(header, image + proglen);
+               set_memory_ro((unsigned long)header, header->pages);
                fp->bpf_func = (void *)image;
        }
 out:
@@ -738,20 +772,13 @@ out:
        return;
 }
 
-static void jit_free_defer(struct work_struct *arg)
-{
-       module_free(NULL, arg);
-}
-
-/* run from softirq, we must use a work_struct to call
- * module_free() from process context
- */
 void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter) {
-               struct work_struct *work = (struct work_struct *)fp->bpf_func;
+               unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+               struct bpf_binary_header *header = (void *)addr;
 
-               INIT_WORK(work, jit_free_defer);
-               schedule_work(work);
+               set_memory_rw(addr, header->pages);
+               module_free(NULL, header);
        }
 }
index 305c68b8d53825fbda7f635de0ae4c1af43fe03b..981c2dbd72cc45e6d77a98dab694d87b0523696a 100644 (file)
@@ -628,7 +628,9 @@ int pcibios_add_device(struct pci_dev *dev)
 
        pa_data = boot_params.hdr.setup_data;
        while (pa_data) {
-               data = phys_to_virt(pa_data);
+               data = ioremap(pa_data, sizeof(*rom));
+               if (!data)
+                       return -ENOMEM;
 
                if (data->type == SETUP_PCI) {
                        rom = (struct pci_setup_rom *)data;
@@ -645,6 +647,7 @@ int pcibios_add_device(struct pci_dev *dev)
                        }
                }
                pa_data = data->next;
+               iounmap(data);
        }
        return 0;
 }
index 0e0fabf173429006612eb014ec591df98e4b2deb..6eb18c42a28a3584546e87a57f9cccf5af3473e3 100644 (file)
@@ -141,11 +141,6 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
  */
 static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
 {
-       if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
-                               || devfn == PCI_DEVFN(0, 0)
-                               || devfn == PCI_DEVFN(3, 0)))
-               return 1;
-
        /* This is a workaround for A0 LNC bug where PCI status register does
         * not have new CAP bit set. can not be written by SW either.
         *
@@ -155,7 +150,10 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
         */
        if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
                return 0;
-
+       if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
+                               || devfn == PCI_DEVFN(0, 0)
+                               || devfn == PCI_DEVFN(3, 0)))
+               return 1;
        return 0; /* langwell on others */
 }
 
index 55856b2310d37d76240a2af47140f2deb97e8482..5ae2eb09419ec54d2ac7618904c31d961149df43 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/io.h>
 #include <linux/reboot.h>
 #include <linux/bcd.h>
-#include <linux/ucs2_string.h>
 
 #include <asm/setup.h>
 #include <asm/efi.h>
 
 #define EFI_DEBUG      1
 
-/*
- * There's some additional metadata associated with each
- * variable. Intel's reference implementation is 60 bytes - bump that
- * to account for potential alignment constraints
- */
-#define VAR_METADATA_SIZE 64
+#define EFI_MIN_RESERVE 5120
+
+#define EFI_DUMMY_GUID \
+       EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
+
+static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
 
 struct efi __read_mostly efi = {
        .mps        = EFI_INVALID_TABLE_ADDR,
@@ -79,13 +78,6 @@ struct efi_memory_map memmap;
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
-static u64 efi_var_store_size;
-static u64 efi_var_remaining_size;
-static u64 efi_var_max_var_size;
-static u64 boot_used_size;
-static u64 boot_var_size;
-static u64 active_size;
-
 unsigned long x86_efi_facility;
 
 /*
@@ -188,53 +180,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
                                               efi_char16_t *name,
                                               efi_guid_t *vendor)
 {
-       efi_status_t status;
-       static bool finished = false;
-       static u64 var_size;
-
-       status = efi_call_virt3(get_next_variable,
-                               name_size, name, vendor);
-
-       if (status == EFI_NOT_FOUND) {
-               finished = true;
-               if (var_size < boot_used_size) {
-                       boot_var_size = boot_used_size - var_size;
-                       active_size += boot_var_size;
-               } else {
-                       printk(KERN_WARNING FW_BUG  "efi: Inconsistent initial sizes\n");
-               }
-       }
-
-       if (boot_used_size && !finished) {
-               unsigned long size;
-               u32 attr;
-               efi_status_t s;
-               void *tmp;
-
-               s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
-
-               if (s != EFI_BUFFER_TOO_SMALL || !size)
-                       return status;
-
-               tmp = kmalloc(size, GFP_ATOMIC);
-
-               if (!tmp)
-                       return status;
-
-               s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
-
-               if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
-                       var_size += size;
-                       var_size += ucs2_strsize(name, 1024);
-                       active_size += size;
-                       active_size += VAR_METADATA_SIZE;
-                       active_size += ucs2_strsize(name, 1024);
-               }
-
-               kfree(tmp);
-       }
-
-       return status;
+       return efi_call_virt3(get_next_variable,
+                             name_size, name, vendor);
 }
 
 static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -243,34 +190,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
                                          unsigned long data_size,
                                          void *data)
 {
-       efi_status_t status;
-       u32 orig_attr = 0;
-       unsigned long orig_size = 0;
-
-       status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
-                                      NULL);
-
-       if (status != EFI_BUFFER_TOO_SMALL)
-               orig_size = 0;
-
-       status = efi_call_virt5(set_variable,
-                               name, vendor, attr,
-                               data_size, data);
-
-       if (status == EFI_SUCCESS) {
-               if (orig_size) {
-                       active_size -= orig_size;
-                       active_size -= ucs2_strsize(name, 1024);
-                       active_size -= VAR_METADATA_SIZE;
-               }
-               if (data_size) {
-                       active_size += data_size;
-                       active_size += ucs2_strsize(name, 1024);
-                       active_size += VAR_METADATA_SIZE;
-               }
-       }
-
-       return status;
+       return efi_call_virt5(set_variable,
+                             name, vendor, attr,
+                             data_size, data);
 }
 
 static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -786,9 +708,6 @@ void __init efi_init(void)
        char vendor[100] = "unknown";
        int i = 0;
        void *tmp;
-       struct setup_data *data;
-       struct efi_var_bootdata *efi_var_data;
-       u64 pa_data;
 
 #ifdef CONFIG_X86_32
        if (boot_params.efi_info.efi_systab_hi ||
@@ -806,22 +725,6 @@ void __init efi_init(void)
        if (efi_systab_init(efi_phys.systab))
                return;
 
-       pa_data = boot_params.hdr.setup_data;
-       while (pa_data) {
-               data = early_ioremap(pa_data, sizeof(*efi_var_data));
-               if (data->type == SETUP_EFI_VARS) {
-                       efi_var_data = (struct efi_var_bootdata *)data;
-
-                       efi_var_store_size = efi_var_data->store_size;
-                       efi_var_remaining_size = efi_var_data->remaining_size;
-                       efi_var_max_var_size = efi_var_data->max_var_size;
-               }
-               pa_data = data->next;
-               early_iounmap(data, sizeof(*efi_var_data));
-       }
-
-       boot_used_size = efi_var_store_size - efi_var_remaining_size;
-
        set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
 
        /*
@@ -1085,6 +988,13 @@ void __init efi_enter_virtual_mode(void)
                runtime_code_page_mkexec();
 
        kfree(new_memmap);
+
+       /* clean DUMMY object */
+       efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+                        EFI_VARIABLE_NON_VOLATILE |
+                        EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                        EFI_VARIABLE_RUNTIME_ACCESS,
+                        0, NULL);
 }
 
 /*
@@ -1136,33 +1046,65 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
        efi_status_t status;
        u64 storage_size, remaining_size, max_size;
 
+       if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
+               return 0;
+
        status = efi.query_variable_info(attributes, &storage_size,
                                         &remaining_size, &max_size);
        if (status != EFI_SUCCESS)
                return status;
 
-       if (!max_size && remaining_size > size)
-               printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
-                           " is returning MaxVariableSize=0\n");
        /*
         * Some firmware implementations refuse to boot if there's insufficient
         * space in the variable store. We account for that by refusing the
         * write if permitting it would reduce the available space to under
-        * 50%. However, some firmware won't reclaim variable space until
-        * after the used (not merely the actively used) space drops below
-        * a threshold. We can approximate that case with the value calculated
-        * above. If both the firmware and our calculations indicate that the
-        * available space would drop below 50%, refuse the write.
+        * 5KB. This figure was provided by Samsung, so should be safe.
         */
+       if ((remaining_size - size < EFI_MIN_RESERVE) &&
+               !efi_no_storage_paranoia) {
+
+               /*
+                * Triggering garbage collection may require that the firmware
+                * generate a real EFI_OUT_OF_RESOURCES error. We can force
+                * that by attempting to use more space than is available.
+                */
+               unsigned long dummy_size = remaining_size + 1024;
+               void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
+
+               status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+                                         EFI_VARIABLE_NON_VOLATILE |
+                                         EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                                         EFI_VARIABLE_RUNTIME_ACCESS,
+                                         dummy_size, dummy);
+
+               if (status == EFI_SUCCESS) {
+                       /*
+                        * This should have failed, so if it didn't make sure
+                        * that we delete it...
+                        */
+                       efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+                                        EFI_VARIABLE_NON_VOLATILE |
+                                        EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                                        EFI_VARIABLE_RUNTIME_ACCESS,
+                                        0, dummy);
+               }
 
-       if (!storage_size || size > remaining_size ||
-           (max_size && size > max_size))
-               return EFI_OUT_OF_RESOURCES;
+               /*
+                * The runtime code may now have triggered a garbage collection
+                * run, so check the variable info again
+                */
+               status = efi.query_variable_info(attributes, &storage_size,
+                                                &remaining_size, &max_size);
 
-       if (!efi_no_storage_paranoia &&
-           ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
-            (remaining_size - size < storage_size / 2)))
-               return EFI_OUT_OF_RESOURCES;
+               if (status != EFI_SUCCESS)
+                       return status;
+
+               /*
+                * There still isn't enough room, so return an error
+                */
+               if (remaining_size - size < EFI_MIN_RESERVE)
+                       return EFI_OUT_OF_RESOURCES;
+       }
 
        return EFI_SUCCESS;
 }
index 590be10908925b1d26e8b8bb1ffc3c03925ac159..f7bab68a4b83e4094db9b70c50292fddd88098c1 100644 (file)
@@ -42,9 +42,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
        "^(xen_irq_disable_direct_reloc$|"
        "xen_save_fl_direct_reloc$|"
        "VDSO|"
-#if ELF_BITS == 64
-       "__vvar_page|"
-#endif
        "__crc_)",
 
 /*
@@ -72,6 +69,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
        "__per_cpu_load|"
        "init_per_cpu__.*|"
        "__end_rodata_hpage_align|"
+       "__vvar_page|"
 #endif
        "_end)$"
 };
index 8ff37995d54e92518e3da0d7400e1dc661ddf0a7..d99cae8147d1243b84faa111bcd74b447e5ff7ec 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/smp.h>
 #include <linux/irq_work.h>
+#include <linux/tick.h>
 
 #include <asm/paravirt.h>
 #include <asm/desc.h>
@@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
        play_dead_common();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
        cpu_bringup();
+       /*
+        * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
+        * clears certain data that the cpu_idle loop (which called us
+        * and that we return from) expects. The only way to get that
+        * data back is to call:
+        */
+       tick_nohz_idle_enter();
 }
 
 #else /* !CONFIG_HOTPLUG_CPU */
@@ -576,24 +584,22 @@ void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
 {
        unsigned cpu;
        unsigned int this_cpu = smp_processor_id();
+       int xen_vector = xen_map_vector(vector);
 
-       if (!(num_online_cpus() > 1))
+       if (!(num_online_cpus() > 1) || (xen_vector < 0))
                return;
 
        for_each_cpu_and(cpu, mask, cpu_online_mask) {
                if (this_cpu == cpu)
                        continue;
 
-               xen_smp_send_call_function_single_ipi(cpu);
+               xen_send_IPI_one(cpu, xen_vector);
        }
 }
 
 void xen_send_IPI_allbutself(int vector)
 {
-       int xen_vector = xen_map_vector(vector);
-
-       if (xen_vector >= 0)
-               xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
+       xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
 }
 
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
index 8981a76d081a0c0cff16f89e99b1dc2b5e53bc98..c7c2d89efd76ac3c7627168b0a3d2f3964627f66 100644 (file)
@@ -5,7 +5,6 @@ extern void xen_send_IPI_mask(const struct cpumask *mask,
 extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
                                int vector);
 extern void xen_send_IPI_allbutself(int vector);
-extern void physflat_send_IPI_allbutself(int vector);
 extern void xen_send_IPI_all(int vector);
 extern void xen_send_IPI_self(int vector);
 
index a8f44f50e651064b2358c7c8ab906a2103d38acc..b21ace4fc9bab09ef3aa58170a0ac3e0d814dc3d 100644 (file)
@@ -85,4 +85,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* _XTENSA_SOCKET_H */
index 33c33bc99ddd5546e6ba30ce267cb436d0328c51..d5745b5833c9d76527809c41b4718c3df70407df 100644 (file)
@@ -3164,7 +3164,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
                q->rpm_status = RPM_ACTIVE;
                __blk_run_queue(q);
                pm_runtime_mark_last_busy(q->dev);
-               pm_runtime_autosuspend(q->dev);
+               pm_request_autosuspend(q->dev);
        } else {
                q->rpm_status = RPM_SUSPENDED;
        }
index 622d8a48cbe9cd5861701320b7a83ec78c4428c8..bf8148e74e73cbf4fb077a3b82a9301581c34d12 100644 (file)
@@ -823,6 +823,7 @@ config CRYPTO_BLOWFISH_X86_64
 config CRYPTO_BLOWFISH_AVX2_X86_64
        tristate "Blowfish cipher algorithm (x86_64/AVX2)"
        depends on X86 && 64BIT
+       depends on BROKEN
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
        select CRYPTO_ABLK_HELPER_X86
@@ -1299,6 +1300,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
 config CRYPTO_TWOFISH_AVX2_X86_64
        tristate "Twofish cipher algorithm (x86_64/AVX2)"
        depends on X86 && 64BIT
+       depends on BROKEN
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
        select CRYPTO_ABLK_HELPER_X86
index ecb743bf05a54c4541f028f60170f45dcd20bb8b..536562c626a2fab4bb20c128abcf27f4404ca23f 100644 (file)
@@ -24,7 +24,7 @@ acpi-y                                += nvs.o
 # Power management related files
 acpi-y                         += wakeup.o
 acpi-y                         += sleep.o
-acpi-$(CONFIG_PM)              += device_pm.o
+acpi-y                         += device_pm.o
 acpi-$(CONFIG_ACPI_SLEEP)      += proc.o
 
 
@@ -38,7 +38,6 @@ acpi-y                                += processor_core.o
 acpi-y                         += ec.o
 acpi-$(CONFIG_ACPI_DOCK)       += dock.o
 acpi-y                         += pci_root.o pci_link.o pci_irq.o
-acpi-y                         += csrt.o
 acpi-$(CONFIG_X86_INTEL_LPSS)  += acpi_lpss.o
 acpi-y                         += acpi_platform.o
 acpi-y                         += power.o
index 00d2efd674df5b7a2ecca39ac2e161cfcf14b719..4f4e741d34b2c9616808177c31e7b488ee00ed8e 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/delay.h>
 #ifdef CONFIG_ACPI_PROCFS_POWER
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev);
 #endif
 static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
 
+static int ac_sleep_before_get_state_ms;
+
 static struct acpi_driver acpi_ac_driver = {
        .name = "ac",
        .class = ACPI_AC_CLASS,
@@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
        case ACPI_AC_NOTIFY_STATUS:
        case ACPI_NOTIFY_BUS_CHECK:
        case ACPI_NOTIFY_DEVICE_CHECK:
+               /*
+                * A buggy BIOS may notify AC first and then sleep for
+                * a specific time before doing actual operations in the
+                * EC event handler (_Qxx). This will cause the AC state
+                * reported by the ACPI event to be incorrect, so wait for a
+                * specific time for the EC event handler to make progress.
+                */
+               if (ac_sleep_before_get_state_ms > 0)
+                       msleep(ac_sleep_before_get_state_ms);
+
                acpi_ac_get_state(ac);
                acpi_bus_generate_proc_event(device, event, (u32) ac->state);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
@@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
        return;
 }
 
+static int thinkpad_e530_quirk(const struct dmi_system_id *d)
+{
+       ac_sleep_before_get_state_ms = 1000;
+       return 0;
+}
+
+static struct dmi_system_id ac_dmi_table[] = {
+       {
+       .callback = thinkpad_e530_quirk,
+       .ident = "thinkpad e530",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
+               },
+       },
+       {},
+};
+
 static int acpi_ac_add(struct acpi_device *device)
 {
        int result = 0;
@@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device)
                kfree(ac);
        }
 
+       dmi_check_system(ac_dmi_table);
        return result;
 }
 
index b1c95422ce74ab79dd13937fd6a7ad260e1055e5..652fd5ce303c4a9efdbfa3f6d4eb330fba42a5bd 100644 (file)
@@ -35,11 +35,16 @@ ACPI_MODULE_NAME("acpi_lpss");
 
 struct lpss_device_desc {
        bool clk_required;
-       const char *clk_parent;
+       const char *clkdev_name;
        bool ltr_required;
        unsigned int prv_offset;
 };
 
+static struct lpss_device_desc lpss_dma_desc = {
+       .clk_required = true,
+       .clkdev_name = "hclk",
+};
+
 struct lpss_private_data {
        void __iomem *mmio_base;
        resource_size_t mmio_size;
@@ -49,7 +54,6 @@ struct lpss_private_data {
 
 static struct lpss_device_desc lpt_dev_desc = {
        .clk_required = true,
-       .clk_parent = "lpss_clk",
        .prv_offset = 0x800,
        .ltr_required = true,
 };
@@ -60,6 +64,9 @@ static struct lpss_device_desc lpt_sdio_dev_desc = {
 };
 
 static const struct acpi_device_id acpi_lpss_device_ids[] = {
+       /* Generic LPSS devices */
+       { "INTL9C60", (unsigned long)&lpss_dma_desc },
+
        /* Lynxpoint LPSS devices */
        { "INT33C0", (unsigned long)&lpt_dev_desc },
        { "INT33C1", (unsigned long)&lpt_dev_desc },
@@ -91,16 +98,27 @@ static int register_device_clock(struct acpi_device *adev,
                                 struct lpss_private_data *pdata)
 {
        const struct lpss_device_desc *dev_desc = pdata->dev_desc;
+       struct lpss_clk_data *clk_data;
 
        if (!lpss_clk_dev)
                lpt_register_clock_device();
 
-       if (!dev_desc->clk_parent || !pdata->mmio_base
+       clk_data = platform_get_drvdata(lpss_clk_dev);
+       if (!clk_data)
+               return -ENODEV;
+
+       if (dev_desc->clkdev_name) {
+               clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
+                                   dev_name(&adev->dev));
+               return 0;
+       }
+
+       if (!pdata->mmio_base
            || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
                return -ENODATA;
 
        pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
-                                      dev_desc->clk_parent, 0,
+                                      clk_data->name, 0,
                                       pdata->mmio_base + dev_desc->prv_offset,
                                       0, 0, NULL);
        if (IS_ERR(pdata->clk))
index fefc2ca7cc3e0199b5f98b707107d424dd40b8eb..33dc6a004802120fff9eb1e0e1e757dd542cc4f8 100644 (file)
@@ -250,10 +250,6 @@ static const char *cper_pcie_port_type_strs[] = {
 static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                            const struct acpi_hest_generic_data *gdata)
 {
-#ifdef CONFIG_ACPI_APEI_PCIEAER
-       struct pci_dev *dev;
-#endif
-
        if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
                printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
                       pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -285,20 +281,6 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                printk(
        "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
        pfx, pcie->bridge.secondary_status, pcie->bridge.control);
-#ifdef CONFIG_ACPI_APEI_PCIEAER
-       dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
-                       pcie->device_id.bus, pcie->device_id.function);
-       if (!dev) {
-               pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
-                       pcie->device_id.segment, pcie->device_id.bus,
-                       pcie->device_id.slot, pcie->device_id.function);
-               return;
-       }
-       if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
-               cper_print_aer(pfx, dev, gdata->error_severity,
-                               (struct aer_capability_regs *) pcie->aer_info);
-       pci_dev_put(dev);
-#endif
 }
 
 static const char *apei_estatus_section_flag_strs[] = {
index d668a8ae602bb4533b793911408cf138c047a4ce..fcd7d91cec34af25ed8ccfc32188a24b677c641b 100644 (file)
@@ -454,7 +454,9 @@ static void ghes_do_proc(struct ghes *ghes,
                                aer_severity = cper_severity_to_aer(sev);
                                aer_recover_queue(pcie_err->device_id.segment,
                                                  pcie_err->device_id.bus,
-                                                 devfn, aer_severity);
+                                                 devfn, aer_severity,
+                                                 (struct aer_capability_regs *)
+                                                 pcie_err->aer_info);
                        }
 
                }
@@ -917,13 +919,14 @@ static int ghes_probe(struct platform_device *ghes_dev)
                break;
        case ACPI_HEST_NOTIFY_EXTERNAL:
                /* External interrupt vector is GSI */
-               if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+               rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
+               if (rc) {
                        pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
                               generic->header.source_id);
                        goto err_edac_unreg;
                }
-               if (request_irq(ghes->irq, ghes_irq_func,
-                               0, "GHES IRQ", ghes)) {
+               rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
+               if (rc) {
                        pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
                               generic->header.source_id);
                        goto err_edac_unreg;
diff --git a/drivers/acpi/csrt.c b/drivers/acpi/csrt.c
deleted file mode 100644 (file)
index 5c15a91..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Support for Core System Resources Table (CSRT)
- *
- * Copyright (C) 2013, Intel Corporation
- * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
- *         Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) "ACPI: CSRT: " fmt
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sizes.h>
-
-ACPI_MODULE_NAME("CSRT");
-
-static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev,
-                                             const struct acpi_csrt_group *grp)
-{
-       const struct acpi_csrt_shared_info *si;
-       struct resource res[3];
-       size_t nres;
-       int ret;
-
-       memset(res, 0, sizeof(res));
-       nres = 0;
-
-       si = (const struct acpi_csrt_shared_info *)&grp[1];
-       /*
-        * The peripherals that are listed on CSRT typically support only
-        * 32-bit addresses so we only use the low part of MMIO base for
-        * now.
-        */
-       if (!si->mmio_base_high && si->mmio_base_low) {
-               /*
-                * There is no size of the memory resource in shared_info
-                * so we assume that it is 4k here.
-                */
-               res[nres].start = si->mmio_base_low;
-               res[nres].end = res[0].start + SZ_4K - 1;
-               res[nres++].flags = IORESOURCE_MEM;
-       }
-
-       if (si->gsi_interrupt) {
-               int irq = acpi_register_gsi(NULL, si->gsi_interrupt,
-                                           si->interrupt_mode,
-                                           si->interrupt_polarity);
-               res[nres].start = irq;
-               res[nres].end = irq;
-               res[nres++].flags = IORESOURCE_IRQ;
-       }
-
-       if (si->base_request_line || si->num_handshake_signals) {
-               /*
-                * We pass the driver a DMA resource describing the range
-                * of request lines the device supports.
-                */
-               res[nres].start = si->base_request_line;
-               res[nres].end = res[nres].start + si->num_handshake_signals - 1;
-               res[nres++].flags = IORESOURCE_DMA;
-       }
-
-       ret = platform_device_add_resources(pdev, res, nres);
-       if (ret) {
-               if (si->gsi_interrupt)
-                       acpi_unregister_gsi(si->gsi_interrupt);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int __init
-acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp)
-{
-       struct platform_device *pdev;
-       char vendor[5], name[16];
-       int ret, i;
-
-       vendor[0] = grp->vendor_id;
-       vendor[1] = grp->vendor_id >> 8;
-       vendor[2] = grp->vendor_id >> 16;
-       vendor[3] = grp->vendor_id >> 24;
-       vendor[4] = '\0';
-
-       if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
-               return -ENODEV;
-
-       snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id);
-       pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO);
-       if (!pdev)
-               return -ENOMEM;
-
-       /* Add resources based on the shared info */
-       ret = acpi_csrt_parse_shared_info(pdev, grp);
-       if (ret)
-               goto fail;
-
-       ret = platform_device_add(pdev);
-       if (ret)
-               goto fail;
-
-       for (i = 0; i < pdev->num_resources; i++)
-               dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]);
-
-       return 0;
-
-fail:
-       platform_device_put(pdev);
-       return ret;
-}
-
-/*
- * CSRT or Core System Resources Table is a proprietary ACPI table
- * introduced by Microsoft. This table can contain devices that are not in
- * the system DSDT table. In particular DMA controllers might be described
- * here.
- *
- * We present these devices as normal platform devices that don't have ACPI
- * IDs or handle. The platform device name will be something like
- * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto.
- */
-void __init acpi_csrt_init(void)
-{
-       struct acpi_csrt_group *grp, *end;
-       struct acpi_table_csrt *csrt;
-       acpi_status status;
-       int ret;
-
-       status = acpi_get_table(ACPI_SIG_CSRT, 0,
-                               (struct acpi_table_header **)&csrt);
-       if (ACPI_FAILURE(status)) {
-               if (status != AE_NOT_FOUND)
-                       pr_warn("failed to get the CSRT table\n");
-               return;
-       }
-
-       pr_debug("parsing CSRT table for devices\n");
-
-       grp = (struct acpi_csrt_group *)(csrt + 1);
-       end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
-
-       while (grp < end) {
-               ret = acpi_csrt_parse_resource_group(grp);
-               if (ret) {
-                       pr_warn("error in parsing resource group: %d\n", ret);
-                       return;
-               }
-
-               grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
-       }
-}
index 96de787e6104d5509b024d27d4ad9b32527a1d7b..318fa32a141ec41c70c273b1f57f966f38fd55dc 100644 (file)
 #define _COMPONENT     ACPI_POWER_COMPONENT
 ACPI_MODULE_NAME("device_pm");
 
-static DEFINE_MUTEX(acpi_pm_notifier_lock);
-
-/**
- * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
- * @adev: ACPI device to add the notifier for.
- * @context: Context information to pass to the notifier routine.
- *
- * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
- * PM wakeup events.  For example, wakeup events may be generated for bridges
- * if one of the devices below the bridge is signaling wakeup, even if the
- * bridge itself doesn't have a wakeup GPE associated with it.
- */
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
-                                acpi_notify_handler handler, void *context)
-{
-       acpi_status status = AE_ALREADY_EXISTS;
-
-       mutex_lock(&acpi_pm_notifier_lock);
-
-       if (adev->wakeup.flags.notifier_present)
-               goto out;
-
-       status = acpi_install_notify_handler(adev->handle,
-                                            ACPI_SYSTEM_NOTIFY,
-                                            handler, context);
-       if (ACPI_FAILURE(status))
-               goto out;
-
-       adev->wakeup.flags.notifier_present = true;
-
- out:
-       mutex_unlock(&acpi_pm_notifier_lock);
-       return status;
-}
-
-/**
- * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
- * @adev: ACPI device to remove the notifier from.
- */
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
-                                   acpi_notify_handler handler)
-{
-       acpi_status status = AE_BAD_PARAMETER;
-
-       mutex_lock(&acpi_pm_notifier_lock);
-
-       if (!adev->wakeup.flags.notifier_present)
-               goto out;
-
-       status = acpi_remove_notify_handler(adev->handle,
-                                           ACPI_SYSTEM_NOTIFY,
-                                           handler);
-       if (ACPI_FAILURE(status))
-               goto out;
-
-       adev->wakeup.flags.notifier_present = false;
-
- out:
-       mutex_unlock(&acpi_pm_notifier_lock);
-       return status;
-}
-
 /**
  * acpi_power_state_string - String representation of ACPI device power state.
  * @state: ACPI device power state to return the string representation of.
@@ -340,11 +278,13 @@ int acpi_bus_init_power(struct acpi_device *device)
                if (result)
                        return result;
        } else if (state == ACPI_STATE_UNKNOWN) {
-               /* No power resources and missing _PSC? Try to force D0. */
+               /*
+                * No power resources and missing _PSC?  Cross fingers and make
+                * it D0 in hope that this is what the BIOS put the device into.
+                * [We tried to force D0 here by executing _PS0, but that broke
+                * Toshiba P870-303 in a nasty way.]
+                */
                state = ACPI_STATE_D0;
-               result = acpi_dev_pm_explicit_set(device, state);
-               if (result)
-                       return result;
        }
        device->power.state = state;
        return 0;
@@ -385,6 +325,69 @@ bool acpi_bus_power_manageable(acpi_handle handle)
 }
 EXPORT_SYMBOL(acpi_bus_power_manageable);
 
+#ifdef CONFIG_PM
+static DEFINE_MUTEX(acpi_pm_notifier_lock);
+
+/**
+ * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
+ * @adev: ACPI device to add the notifier for.
+ * @context: Context information to pass to the notifier routine.
+ *
+ * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
+ * PM wakeup events.  For example, wakeup events may be generated for bridges
+ * if one of the devices below the bridge is signaling wakeup, even if the
+ * bridge itself doesn't have a wakeup GPE associated with it.
+ */
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
+                                acpi_notify_handler handler, void *context)
+{
+       acpi_status status = AE_ALREADY_EXISTS;
+
+       mutex_lock(&acpi_pm_notifier_lock);
+
+       if (adev->wakeup.flags.notifier_present)
+               goto out;
+
+       status = acpi_install_notify_handler(adev->handle,
+                                            ACPI_SYSTEM_NOTIFY,
+                                            handler, context);
+       if (ACPI_FAILURE(status))
+               goto out;
+
+       adev->wakeup.flags.notifier_present = true;
+
+ out:
+       mutex_unlock(&acpi_pm_notifier_lock);
+       return status;
+}
+
+/**
+ * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
+ * @adev: ACPI device to remove the notifier from.
+ */
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
+                                   acpi_notify_handler handler)
+{
+       acpi_status status = AE_BAD_PARAMETER;
+
+       mutex_lock(&acpi_pm_notifier_lock);
+
+       if (!adev->wakeup.flags.notifier_present)
+               goto out;
+
+       status = acpi_remove_notify_handler(adev->handle,
+                                           ACPI_SYSTEM_NOTIFY,
+                                           handler);
+       if (ACPI_FAILURE(status))
+               goto out;
+
+       adev->wakeup.flags.notifier_present = false;
+
+ out:
+       mutex_unlock(&acpi_pm_notifier_lock);
+       return status;
+}
+
 bool acpi_bus_can_wakeup(acpi_handle handle)
 {
        struct acpi_device *device;
@@ -1023,3 +1026,4 @@ void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
        mutex_unlock(&adev->physical_node_lock);
 }
 EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
+#endif /* CONFIG_PM */
index d45b2871d33b12fa2453f4f91da1a1d55bf19edc..edc00818c80321086ff3938f5df3cf2afae04d61 100644 (file)
@@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
 static int ec_poll(struct acpi_ec *ec)
 {
        unsigned long flags;
-       int repeat = 2; /* number of command restarts */
+       int repeat = 5; /* number of command restarts */
        while (repeat--) {
                unsigned long delay = jiffies +
                        msecs_to_jiffies(ec_delay);
@@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec)
                        }
                        advance_transaction(ec, acpi_ec_read_status(ec));
                } while (time_before(jiffies, delay));
-               if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
-                       break;
                pr_debug(PREFIX "controller reset, restart transaction\n");
                spin_lock_irqsave(&ec->lock, flags);
                start_transaction(ec);
index 6f1afd9118c806f728ce161033e932f3dcfc485f..297cbf456f86bee35300ef6c72058615663f563f 100644 (file)
@@ -35,7 +35,6 @@ void acpi_pci_link_init(void);
 void acpi_pci_root_hp_init(void);
 void acpi_platform_init(void);
 int acpi_sysfs_init(void);
-void acpi_csrt_init(void);
 #ifdef CONFIG_ACPI_CONTAINER
 void acpi_container_init(void);
 #else
index 1dd6f6c8587442ab1fcbdf7d7bd78d98ef551a9b..e427dc516c76d1d8d73b5a91c7d4f1f73b83cfbf 100644 (file)
@@ -641,7 +641,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
                /* bus enumerate */
                printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__,
                                 (char *)buffer.pointer);
-               if (!root)
+               if (root)
+                       acpiphp_check_host_bridge(handle);
+               else
                        handle_root_bridge_insertion(handle);
 
                break;
index bec717ffd25f5ff7c016f57365a380dc8e91b7fb..c266cdc117840bcdfc7909c2605840501b2c87c2 100644 (file)
@@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
 
-static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
-                        acpi_processor_suspend, acpi_processor_resume);
-
 static struct acpi_driver acpi_processor_driver = {
        .name = "processor",
        .class = ACPI_PROCESSOR_CLASS,
@@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = {
                .remove = acpi_processor_remove,
                .notify = acpi_processor_notify,
                },
-       .drv.pm = &acpi_processor_pm,
 };
 
 #define INSTALL_NOTIFY_HANDLER         1
@@ -934,6 +930,8 @@ static int __init acpi_processor_init(void)
        if (result < 0)
                return result;
 
+       acpi_processor_syscore_init();
+
        acpi_processor_install_hotplug_notify();
 
        acpi_thermal_cpufreq_init();
@@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void)
 
        acpi_processor_uninstall_hotplug_notify();
 
+       acpi_processor_syscore_exit();
+
        acpi_bus_unregister_driver(&acpi_processor_driver);
 
        return;
index f0df2c9434d2ee14809db94e9150b48de0878a7d..eb133c77aadb5f38a7d60824958fa02457c4c67b 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/sched.h>       /* need_resched() */
 #include <linux/clockchips.h>
 #include <linux/cpuidle.h>
+#include <linux/syscore_ops.h>
 
 /*
  * Include the apic definitions for x86 to have the APIC timer related defines
@@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 
 #endif
 
+#ifdef CONFIG_PM_SLEEP
 static u32 saved_bm_rld;
 
-static void acpi_idle_bm_rld_save(void)
+int acpi_processor_suspend(void)
 {
        acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+       return 0;
 }
-static void acpi_idle_bm_rld_restore(void)
+
+void acpi_processor_resume(void)
 {
        u32 resumed_bm_rld;
 
        acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+       if (resumed_bm_rld == saved_bm_rld)
+               return;
 
-       if (resumed_bm_rld != saved_bm_rld)
-               acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+       acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
 }
 
-int acpi_processor_suspend(struct device *dev)
+static struct syscore_ops acpi_processor_syscore_ops = {
+       .suspend = acpi_processor_suspend,
+       .resume = acpi_processor_resume,
+};
+
+void acpi_processor_syscore_init(void)
 {
-       acpi_idle_bm_rld_save();
-       return 0;
+       register_syscore_ops(&acpi_processor_syscore_ops);
 }
 
-int acpi_processor_resume(struct device *dev)
+void acpi_processor_syscore_exit(void)
 {
-       acpi_idle_bm_rld_restore();
-       return 0;
+       unregister_syscore_ops(&acpi_processor_syscore_ops);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 #if defined(CONFIG_X86)
 static void tsc_check_state(int state)
index fe158fd4f1df401d74c04c612cca591f4a7d8eaf..b14ac46948c9e444246eba50e367ed1a16399ea2 100644 (file)
@@ -1017,11 +1017,8 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
                return -ENOSYS;
 
        result = driver->ops.add(device);
-       if (result) {
-               device->driver = NULL;
-               device->driver_data = NULL;
+       if (result)
                return result;
-       }
 
        device->driver = driver;
 
@@ -1785,7 +1782,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
        acpi_set_pnp_ids(handle, &pnp, type);
 
        if (!pnp.type.hardware_id)
-               return;
+               goto out;
 
        /*
         * This relies on the fact that acpi_install_notify_handler() will not
@@ -1800,6 +1797,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
                }
        }
 
+out:
        acpi_free_pnp_ids(&pnp);
 }
 
@@ -2042,7 +2040,6 @@ int __init acpi_scan_init(void)
        acpi_pci_link_init();
        acpi_platform_init();
        acpi_lpss_init();
-       acpi_csrt_init();
        acpi_container_init();
        acpi_memory_hotplug_init();
 
index c3932d0876e0059c4a7c7aee7342b7123388e27c..440eadf2d32cdd270a75e9023f9ef31a1d9ae794 100644 (file)
@@ -456,6 +456,30 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
                },
        },
+       {
+        .callback = video_ignore_initial_backlight,
+        .ident = "HP Pavilion g6 Notebook PC",
+        .matches = {
+                DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+                DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+               },
+       },
+       {
+        .callback = video_ignore_initial_backlight,
+        .ident = "HP 1000 Notebook PC",
+        .matches = {
+               DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
+               },
+       },
+       {
+        .callback = video_ignore_initial_backlight,
+        .ident = "HP Pavilion m4",
+        .matches = {
+               DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
+               },
+       },
        {}
 };
 
@@ -1698,6 +1722,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
        int error;
        acpi_status status;
 
+       if (device->handler)
+               return -EINVAL;
+
        status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
                                device->parent->handle, 1,
                                acpi_video_bus_match, NULL,
index 66f67626f02ec42c5324085fc0a026f2ffca783d..e6bd910bc6edee4b55e3b6de7888bdd5c5ec06f6 100644 (file)
@@ -161,6 +161,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
                },
        },
+       {
+       .callback = video_detect_force_vendor,
+       .ident = "Asus UL30A",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
+               },
+       },
        { },
 };
 
index 4e94ba29cb8d321eae212fa27ee1e72658801c34..9d0cf019ce59a5034984ced1eb113f77b94206ff 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *  acard-ahci.c - ACard AHCI SATA support
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 251e57d38942cdec01790d5fa02cb2c7aed87535..2b50dfdf1cfc3ea3a6e468e3776e64958fd38e5e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  ahci.c - AHCI SATA support
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
@@ -423,6 +423,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
          .driver_data = board_ahci_yes_fbs },                  /* 88se9125 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 on some Gigabyte */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
index b830e6c9fe49f385b77405c188b100a7afcc0570..10b14d45cfd2f8d2d9bb712a75e7787cdf67bdc1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  ahci.h - Common AHCI SATA definitions and declarations
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 2f48123d74c4c85d84180e3e7a39c712a1b7a175..9a8a674e8fac9df8ac2c6d08515ce88e6b213fc8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *    ata_piix.c - Intel PATA/SATA controllers
  *
- *    Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *    Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
@@ -151,6 +151,7 @@ enum piix_controller_ids {
        piix_pata_vmw,                  /* PIIX4 for VMware, spurious DMA_ERR */
        ich8_sata_snb,
        ich8_2port_sata_snb,
+       ich8_2port_sata_byt,
 };
 
 struct piix_map_db {
@@ -334,6 +335,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
        /* SATA Controller IDE (Wellsburg) */
        { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (BayTrail) */
+       { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+       { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
 
        { }     /* terminate list */
 };
@@ -441,6 +445,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
        [tolapai_sata]          = &tolapai_map_db,
        [ich8_sata_snb]         = &ich8_map_db,
        [ich8_2port_sata_snb]   = &ich8_2port_map_db,
+       [ich8_2port_sata_byt]   = &ich8_2port_map_db,
 };
 
 static struct pci_bits piix_enable_bits[] = {
@@ -1254,6 +1259,16 @@ static struct ata_port_info piix_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &piix_sata_ops,
        },
+
+       [ich8_2port_sata_byt] =
+       {
+               .flags          = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+               .pio_mask       = ATA_PIO4,
+               .mwdma_mask     = ATA_MWDMA2,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &piix_sata_ops,
+       },
+
 };
 
 #define AHCI_PCI_BAR 5
index 34c82167b9625f41666125685148b68774036fd9..a70ff154f586753d6c096b858ae5dbee0ad869c3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libahci.c - Common AHCI SATA low-level routines
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 63c743baf920217181bdb6fcd7cde0f3dc61d0c6..f2184276539d885d167c2048a305847b51e58417 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-core.c - helper library for ATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
@@ -1602,6 +1602,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
        qc->tf = *tf;
        if (cdb)
                memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
+
+       /* some SATA bridges need us to indicate data xfer direction */
+       if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
+           dma_dir == DMA_FROM_DEVICE)
+               qc->tf.feature |= ATAPI_DMADIR;
+
        qc->flags |= ATA_QCFLAG_RESULT_TF;
        qc->dma_dir = dma_dir;
        if (dma_dir != DMA_NONE) {
index f9476fb3ac43b44596c1134fe8fea55bffdd5f71..c69fcce505c03d06c7b20ae333ff9708a228f869 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-eh.c - libata error handling
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index dd310b27b24c3dddcf7c4c7e6d4b3b042ee4dc53..0101af541436f8076b056c237fd2dc9cc8e4dce6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-scsi.c - helper library for ATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index d8af325a6bda8cc8f76dff8a69b9d746d5c9c3a6..b603720b877dd0344478ddecf234bb14f77975fa 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-sff.c - helper library for PCI IDE BMDMA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index c1bfaf43d109119e5f47d709365f91c5c170c656..980b88e109fcf5109e164d155dfca86de735c2c2 100644 (file)
@@ -933,11 +933,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
        }
 
        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem_res) {
-               err = -ENXIO;
-               goto err_rel_gpio;
-       }
-
        ide_base = devm_ioremap_resource(&pdev->dev, mem_res);
        if (IS_ERR(ide_base)) {
                err = PTR_ERR(ide_base);
index 505333340ad517ecc54e924463ab13e364534977..8ea6e6afd041db7736e6a69c88641253fa4b4078 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  pdc_adma.c - Pacific Digital Corporation ADMA
  *
- *  Maintained by:  Mark Lord <mlord@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *
  *  Copyright 2005 Mark Lord
  *
index fb0dd87f889378a952a339e048faa3629177779d..958ba2a420c34b0e5411b116d639214e700f5c47 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_promise.c - Promise SATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Mikael Pettersson <mikpe@it.uu.se>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
index 4799868bd7339c40ad863a317e92e2d0ddfffe9a..249c8a289bfd5dbb97f25fbcbd99ff241ac40c21 100644 (file)
@@ -549,6 +549,7 @@ static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
 
        /* start host DMA transaction */
        dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+       dmactl &= ~ATAPI_CONTROL1_STOP;
        dmactl |= ATAPI_CONTROL1_START;
        iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
 }
@@ -618,17 +619,16 @@ static struct ata_port_operations sata_rcar_port_ops = {
        .bmdma_status           = sata_rcar_bmdma_status,
 };
 
-static int sata_rcar_serr_interrupt(struct ata_port *ap)
+static void sata_rcar_serr_interrupt(struct ata_port *ap)
 {
        struct sata_rcar_priv *priv = ap->host->private_data;
        struct ata_eh_info *ehi = &ap->link.eh_info;
        int freeze = 0;
-       int handled = 0;
        u32 serror;
 
        serror = ioread32(priv->base + SCRSERR_REG);
        if (!serror)
-               return 0;
+               return;
 
        DPRINTK("SError @host_intr: 0x%x\n", serror);
 
@@ -641,7 +641,6 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
                ata_ehi_push_desc(ehi, "%s", "hotplug");
 
                freeze = serror & SERR_COMM_WAKE ? 0 : 1;
-               handled = 1;
        }
 
        /* freeze or abort */
@@ -649,11 +648,9 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
                ata_port_freeze(ap);
        else
                ata_port_abort(ap);
-
-       return handled;
 }
 
-static int sata_rcar_ata_interrupt(struct ata_port *ap)
+static void sata_rcar_ata_interrupt(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc;
        int handled = 0;
@@ -662,7 +659,9 @@ static int sata_rcar_ata_interrupt(struct ata_port *ap)
        if (qc)
                handled |= ata_bmdma_port_intr(ap, qc);
 
-       return handled;
+       /* be sure to clear ATA interrupt */
+       if (!handled)
+               sata_rcar_check_status(ap);
 }
 
 static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
@@ -677,20 +676,21 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
        spin_lock_irqsave(&host->lock, flags);
 
        sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
+       sataintstat &= SATA_RCAR_INT_MASK;
        if (!sataintstat)
                goto done;
        /* ack */
-       iowrite32(sataintstat & ~SATA_RCAR_INT_MASK,
-                priv->base + SATAINTSTAT_REG);
+       iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG);
 
        ap = host->ports[0];
 
        if (sataintstat & SATAINTSTAT_ATA)
-               handled |= sata_rcar_ata_interrupt(ap);
+               sata_rcar_ata_interrupt(ap);
 
        if (sataintstat & SATAINTSTAT_SERR)
-               handled |= sata_rcar_serr_interrupt(ap);
+               sata_rcar_serr_interrupt(ap);
 
+       handled = 1;
 done:
        spin_unlock_irqrestore(&host->lock, flags);
 
index a7b31672c4b7fd88fb1e999308d531e17b1e8b49..0ae3ca4bf5c0a063077885f18e7375826a696c24 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_sil.c - Silicon Image SATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 7b7127a58f51de8b848086425a50ce1b78560474..9947010afc0f7973e2af77664f6ef39e53840358 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_sx4.c - Promise SATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 5913ea9d57b211959ed30b0850c1b7f8e1ab7f64..87f056e54a9d7566504925ca812347baf6b3e62f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_via.c - VIA Serial ATA controllers
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                Please ALWAYS copy linux-ide@vger.kernel.org
  *                on emails.
  *
index 77a7480dc4d1bc23881c0b6d4a1e469833576424..62a76076b5482a3bacab4a9cf2a5ff2c269efddb 100644 (file)
@@ -1403,7 +1403,7 @@ static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
   rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
   
   skb->data = skb->head;
-  skb->tail = skb->head;
+  skb_reset_tail_pointer(skb);
   skb->len = 0;
   
   if (!rx_give (dev, &rx, pool)) {
index 1a68f947ded86c45e546c837005704a1ed45f293..d414331b480e72afc9ac8ac505dee449f1c795d9 100644 (file)
@@ -1295,6 +1295,7 @@ int subsys_virtual_register(struct bus_type *subsys,
 
        return subsys_register(subsys, groups, virtual_dir);
 }
+EXPORT_SYMBOL_GPL(subsys_virtual_register);
 
 int __init buses_init(void)
 {
index 016312437577ca3378706560a77068dcda67de81..2499cefdcdf2429d9503506d5514a4943f9ec4e1 100644 (file)
@@ -572,9 +572,11 @@ int device_create_file(struct device *dev,
 
        if (dev) {
                WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
-                               "Write permission without 'store'\n");
+                       "Attribute %s: write permission without 'store'\n",
+                       attr->attr.name);
                WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
-                               "Read permission without 'show'\n");
+                       "Attribute %s: read permission without 'show'\n",
+                       attr->attr.name);
                error = sysfs_create_file(&dev->kobj, &attr->attr);
        }
 
index 39c32529b83374c36eda18188c393930732ad99f..5da914041305907d30f26289f12643b5c581be07 100644 (file)
@@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
 int dev_pm_put_subsys_data(struct device *dev)
 {
        struct pm_subsys_data *psd;
-       int ret = 0;
+       int ret = 1;
 
        spin_lock_irq(&dev->power.lock);
 
        psd = dev_to_psd(dev);
-       if (!psd) {
-               ret = -EINVAL;
+       if (!psd)
                goto out;
-       }
 
        if (--psd->refcount == 0) {
                dev->power.subsys_data = NULL;
-               kfree(psd);
-               ret = 1;
+       } else {
+               psd = NULL;
+               ret = 0;
        }
 
  out:
        spin_unlock_irq(&dev->power.lock);
+       kfree(psd);
 
        return ret;
 }
index aa0875f6f1b7f3b9ec6b9b89963ed100ecb0ac51..02f490bad30f791f627db78584ca75b691bc6eb7 100644 (file)
@@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
        int registers = 0;
        int this_registers, average;
 
-       map->lock(map);
+       map->lock(map->lock_arg);
 
        mem_size = sizeof(*rbtree_ctx);
        mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
        seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
                   nodes, registers, average, mem_size);
 
-       map->unlock(map);
+       map->unlock(map->lock_arg);
 
        return 0;
 }
@@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
        for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
                rbnode = rb_entry(node, struct regcache_rbtree_node, node);
 
-               if (rbnode->base_reg < min)
-                       continue;
                if (rbnode->base_reg > max)
                        break;
                if (rbnode->base_reg + rbnode->blklen < min)
index 75923f2396bd4f7b01727bae6cbedb4239b4ec18..507ee2da0f6ee9b6455ab967f2dcf76a49c57b02 100644 (file)
@@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map)
 
        BUG_ON(!map->cache_ops || !map->cache_ops->sync);
 
-       map->lock(map);
+       map->lock(map->lock_arg);
        /* Remember the initial bypass state */
        bypass = map->cache_bypass;
        dev_dbg(map->dev, "Syncing %s cache\n",
@@ -306,7 +306,7 @@ out:
        trace_regcache_sync(map->dev, name, "stop");
        /* Restore the bypass state */
        map->cache_bypass = bypass;
-       map->unlock(map);
+       map->unlock(map->lock_arg);
 
        return ret;
 }
@@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
 
        BUG_ON(!map->cache_ops || !map->cache_ops->sync);
 
-       map->lock(map);
+       map->lock(map->lock_arg);
 
        /* Remember the initial bypass state */
        bypass = map->cache_bypass;
@@ -352,7 +352,7 @@ out:
        trace_regcache_sync(map->dev, name, "stop region");
        /* Restore the bypass state */
        map->cache_bypass = bypass;
-       map->unlock(map);
+       map->unlock(map->lock_arg);
 
        return ret;
 }
@@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
  */
 void regcache_cache_only(struct regmap *map, bool enable)
 {
-       map->lock(map);
+       map->lock(map->lock_arg);
        WARN_ON(map->cache_bypass && enable);
        map->cache_only = enable;
        trace_regmap_cache_only(map->dev, enable);
-       map->unlock(map);
+       map->unlock(map->lock_arg);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_only);
 
@@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
  */
 void regcache_mark_dirty(struct regmap *map)
 {
-       map->lock(map);
+       map->lock(map->lock_arg);
        map->cache_dirty = true;
-       map->unlock(map);
+       map->unlock(map->lock_arg);
 }
 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
 
@@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
  */
 void regcache_cache_bypass(struct regmap *map, bool enable)
 {
-       map->lock(map);
+       map->lock(map->lock_arg);
        WARN_ON(map->cache_only && enable);
        map->cache_bypass = enable;
        trace_regmap_cache_bypass(map->dev, enable);
-       map->unlock(map);
+       map->unlock(map->lock_arg);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
 
index 23b701f5fd2f6e9edbe6ad75f94bd760bea3e5fa..975719bc345008a4d396c72b5b1e160c50b8d2a8 100644 (file)
@@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file,
        char *start = buf;
        unsigned long reg, value;
        struct regmap *map = file->private_data;
+       int ret;
 
        buf_size = min(count, (sizeof(buf)-1));
        if (copy_from_user(buf, user_buf, buf_size))
@@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file,
        /* Userspace has been fiddling around behind the kernel's back */
        add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
 
-       regmap_write(map, reg, value);
+       ret = regmap_write(map, reg, value);
+       if (ret < 0)
+               return ret;
        return buf_size;
 }
 #else
index f1a29f8e9d33dbe45474172a24f952f32ce4ba47..9bf4371755f22fa93d4ccbe7d2bb605ec56a4048 100644 (file)
@@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
 
        spin_lock(&brd->brd_lock);
        idx = sector >> PAGE_SECTORS_SHIFT;
+       page->index = idx;
        if (radix_tree_insert(&brd->brd_pages, idx, page)) {
                __free_page(page);
                page = radix_tree_lookup(&brd->brd_pages, idx);
                BUG_ON(!page);
                BUG_ON(page->index != idx);
-       } else
-               page->index = idx;
+       }
        spin_unlock(&brd->brd_lock);
 
        radix_tree_preload_end();
index 6374dc103521f451863cb988cc68921448eb2ae1..62b6c2cc80b5e9d7ef68a7ff84a447e24bb7ade5 100644 (file)
@@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
 static int cciss_open(struct block_device *bdev, fmode_t mode);
 static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
 static void cciss_release(struct gendisk *disk, fmode_t mode);
-static int do_ioctl(struct block_device *bdev, fmode_t mode,
-                   unsigned int cmd, unsigned long arg);
 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
                       unsigned int cmd, unsigned long arg);
 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = {
        .owner = THIS_MODULE,
        .open = cciss_unlocked_open,
        .release = cciss_release,
-       .ioctl = do_ioctl,
+       .ioctl = cciss_ioctl,
        .getgeo = cciss_getgeo,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = cciss_compat_ioctl,
@@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode)
        mutex_unlock(&cciss_mutex);
 }
 
-static int do_ioctl(struct block_device *bdev, fmode_t mode,
-                   unsigned cmd, unsigned long arg)
-{
-       int ret;
-       mutex_lock(&cciss_mutex);
-       ret = cciss_ioctl(bdev, mode, cmd, arg);
-       mutex_unlock(&cciss_mutex);
-       return ret;
-}
-
 #ifdef CONFIG_COMPAT
 
 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
@@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
        case CCISS_REGNEWD:
        case CCISS_RESCANDISK:
        case CCISS_GETLUNINFO:
-               return do_ioctl(bdev, mode, cmd, arg);
+               return cciss_ioctl(bdev, mode, cmd, arg);
 
        case CCISS_PASSTHRU32:
                return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
@@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
        if (err)
                return -EFAULT;
 
-       err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
+       err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
        if (err)
                return err;
        err |=
@@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
        if (err)
                return -EFAULT;
 
-       err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
+       err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
        if (err)
                return err;
        err |=
@@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
 static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
 {
        cciss_coalint_struct intinfo;
+       unsigned long flags;
 
        if (!argp)
                return -EINVAL;
+       spin_lock_irqsave(&h->lock, flags);
        intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
        intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
+       spin_unlock_irqrestore(&h->lock, flags);
        if (copy_to_user
            (argp, &intinfo, sizeof(cciss_coalint_struct)))
                return -EFAULT;
@@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
 static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
 {
        NodeName_type NodeName;
+       unsigned long flags;
        int i;
 
        if (!argp)
                return -EINVAL;
+       spin_lock_irqsave(&h->lock, flags);
        for (i = 0; i < 16; i++)
                NodeName[i] = readb(&h->cfgtable->ServerName[i]);
+       spin_unlock_irqrestore(&h->lock, flags);
        if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
                return -EFAULT;
        return 0;
@@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
 static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
 {
        Heartbeat_type heartbeat;
+       unsigned long flags;
 
        if (!argp)
                return -EINVAL;
+       spin_lock_irqsave(&h->lock, flags);
        heartbeat = readl(&h->cfgtable->HeartBeat);
+       spin_unlock_irqrestore(&h->lock, flags);
        if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
                return -EFAULT;
        return 0;
@@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
 static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
 {
        BusTypes_type BusTypes;
+       unsigned long flags;
 
        if (!argp)
                return -EINVAL;
+       spin_lock_irqsave(&h->lock, flags);
        BusTypes = readl(&h->cfgtable->BusTypes);
+       spin_unlock_irqrestore(&h->lock, flags);
        if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
                return -EFAULT;
        return 0;
index 847107ef0cce52c65c4ff1f1aeaab6a1b4540922..20dd52a2f92f8ea733c4b78d444c0ea94d496dd8 100644 (file)
@@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
 
 static void mtip_hw_debugfs_exit(struct driver_data *dd)
 {
-       debugfs_remove_recursive(dd->dfs_node);
+       if (dd->dfs_node)
+               debugfs_remove_recursive(dd->dfs_node);
 }
 
 
@@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
        struct driver_data *dd = queue->queuedata;
        struct scatterlist *sg;
        struct bio_vec *bvec;
-       int nents = 0;
+       int i, nents = 0;
        int tag = 0, unaligned = 0;
 
        if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
                }
 
                /* Create the scatter list for this bio. */
-               bio_for_each_segment(bvec, bio, nents) {
+               bio_for_each_segment(bvec, bio, i) {
                        sg_set_page(&sg[nents],
                                        bvec->bv_page,
                                        bvec->bv_len,
                                        bvec->bv_offset);
+                       nents++;
                }
 
                /* Issue the read/write. */
index 8efdfaa44a59231165bf2307d8dbedaf59cd9ca0..ce79a590b45bff7c9c9e3e5ed206f1d7342c1a55 100644 (file)
@@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        struct nvme_command *cmnd;
        struct nvme_iod *iod;
        enum dma_data_direction dma_dir;
-       int cmdid, length, result = -ENOMEM;
+       int cmdid, length, result;
        u16 control;
        u32 dsmgmt;
        int psegs = bio_phys_segments(ns->queue, bio);
@@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
                        return result;
        }
 
+       result = -ENOMEM;
        iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
        if (!iod)
                goto nomem;
@@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
 
                if (timeout && !time_after(now, info[cmdid].timeout))
                        continue;
+               if (info[cmdid].ctx == CMD_CTX_CANCELLED)
+                       continue;
                dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
                ctx = cancel_cmdid(nvmeq, cmdid, &fn);
                fn(nvmeq->dev, ctx, &cqe);
@@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
 
        if (addr & 3)
                return ERR_PTR(-EINVAL);
-       if (!length)
+       if (!length || length > INT_MAX - PAGE_SIZE)
                return ERR_PTR(-EINVAL);
 
        offset = offset_in_page(addr);
@@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
        sg_init_table(sg, count);
        for (i = 0; i < count; i++) {
                sg_set_page(&sg[i], pages[i],
-                               min_t(int, length, PAGE_SIZE - offset), offset);
+                           min_t(unsigned, length, PAGE_SIZE - offset),
+                           offset);
                length -= (PAGE_SIZE - offset);
                offset = 0;
        }
@@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
                nvme_free_iod(dev, iod);
        }
 
-       if (!status && copy_to_user(&ucmd->result, &cmd.result,
+       if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
                                                        sizeof(cmd.result)))
                status = -EFAULT;
 
@@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
 
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
-       int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
+       struct pci_dev *pdev = dev->pci_dev;
+       int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
 
        nr_io_queues = num_online_cpus();
        result = set_queue_count(dev, nr_io_queues);
@@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        if (result < nr_io_queues)
                nr_io_queues = result;
 
+       q_count = nr_io_queues;
        /* Deregister the admin queue's interrupt */
        free_irq(dev->entry[0].vector, dev->queues[0]);
 
        db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
        if (db_bar_size > 8192) {
                iounmap(dev->bar);
-               dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
-                                                               db_bar_size);
+               dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
                dev->dbs = ((void __iomem *)dev->bar) + 4096;
                dev->queues[0]->q_db = dev->dbs;
        }
@@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        for (i = 0; i < nr_io_queues; i++)
                dev->entry[i].entry = i;
        for (;;) {
-               result = pci_enable_msix(dev->pci_dev, dev->entry,
-                                                               nr_io_queues);
+               result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
                if (result == 0) {
                        break;
                } else if (result > 0) {
                        nr_io_queues = result;
                        continue;
                } else {
-                       nr_io_queues = 1;
+                       nr_io_queues = 0;
                        break;
                }
        }
 
+       if (nr_io_queues == 0) {
+               nr_io_queues = q_count;
+               for (;;) {
+                       result = pci_enable_msi_block(pdev, nr_io_queues);
+                       if (result == 0) {
+                               for (i = 0; i < nr_io_queues; i++)
+                                       dev->entry[i].vector = i + pdev->irq;
+                               break;
+                       } else if (result > 0) {
+                               nr_io_queues = result;
+                               continue;
+                       } else {
+                               nr_io_queues = 1;
+                               break;
+                       }
+               }
+       }
+
        result = queue_request_irq(dev, dev->queues[0], "nvme admin");
        /* XXX: handle failure here */
 
@@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
 {
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
        nvme_dev_remove(dev);
-       pci_disable_msix(dev->pci_dev);
+       if (dev->pci_dev->msi_enabled)
+               pci_disable_msi(dev->pci_dev);
+       else if (dev->pci_dev->msix_enabled)
+               pci_disable_msix(dev->pci_dev);
        iounmap(dev->bar);
        nvme_release_instance(dev);
        nvme_release_prp_pools(dev);
@@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        INIT_LIST_HEAD(&dev->namespaces);
        dev->pci_dev = pdev;
        pci_set_drvdata(pdev, dev);
-       dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
-       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+       else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
+               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       else
+               goto disable;
+
        result = nvme_set_instance(dev);
        if (result)
                goto disable;
@@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  unmap:
        iounmap(dev->bar);
  disable_msix:
-       pci_disable_msix(pdev);
+       if (dev->pci_dev->msi_enabled)
+               pci_disable_msi(dev->pci_dev);
+       else if (dev->pci_dev->msix_enabled)
+               pci_disable_msix(dev->pci_dev);
        nvme_release_instance(dev);
        nvme_release_prp_pools(dev);
  disable:
index fed54b039893474bfa39cbda0004f46ab6c78a18..102de2f52b5c5fa7b4dd877121f156041bb20be7 100644 (file)
@@ -44,7 +44,6 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/version.h>
 #include <scsi/sg.h>
 #include <scsi/scsi.h>
 
@@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
        }
 }
 
-static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                                        u8 *mode_page, u8 page_code)
 {
        int res = SNTI_TRANSLATION_SUCCESS;
index 3c08983e600a0a15e1380e9de1e6f50714fe3976..f5d0ea11d9fda8a4f1b8ad3f23d285bb7887250b 100644 (file)
@@ -83,7 +83,8 @@
 
 #define MAX_SPEED 0xffff
 
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
+#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
+                       ~(sector_t)((pd)->settings.size - 1))
 
 static DEFINE_MUTEX(pktcdvd_mutex);
 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
index ca63104136e0db46d0248aa290c355483989ec2e..3063452e55daf5dafd5b28bb536f81109212fc2e 100644 (file)
 #define        SECTOR_SHIFT    9
 #define        SECTOR_SIZE     (1ULL << SECTOR_SHIFT)
 
+/*
+ * Increment the given counter and return its updated value.
+ * If the counter is already 0 it will not be incremented.
+ * If the counter is already at its maximum value returns
+ * -EINVAL without updating it.
+ */
+static int atomic_inc_return_safe(atomic_t *v)
+{
+       unsigned int counter;
+
+       counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+       if (counter <= (unsigned int)INT_MAX)
+               return (int)counter;
+
+       atomic_dec(v);
+
+       return -EINVAL;
+}
+
+/* Decrement the counter.  Return the resulting value, or -EINVAL */
+static int atomic_dec_return_safe(atomic_t *v)
+{
+       int counter;
+
+       counter = atomic_dec_return(v);
+       if (counter >= 0)
+               return counter;
+
+       atomic_inc(v);
+
+       return -EINVAL;
+}
+
 #define RBD_DRV_NAME "rbd"
 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
 
  * block device image metadata (in-memory version)
  */
 struct rbd_image_header {
-       /* These four fields never change for a given rbd image */
+       /* These six fields never change for a given rbd image */
        char *object_prefix;
-       u64 features;
        __u8 obj_order;
        __u8 crypt_type;
        __u8 comp_type;
+       u64 stripe_unit;
+       u64 stripe_count;
+       u64 features;           /* Might be changeable someday? */
 
        /* The remaining fields need to be updated occasionally */
        u64 image_size;
        struct ceph_snap_context *snapc;
-       char *snap_names;
-       u64 *snap_sizes;
-
-       u64 stripe_unit;
-       u64 stripe_count;
+       char *snap_names;       /* format 1 only */
+       u64 *snap_sizes;        /* format 1 only */
 };
 
 /*
@@ -225,6 +257,7 @@ struct rbd_obj_request {
                };
        };
        struct page             **copyup_pages;
+       u32                     copyup_page_count;
 
        struct ceph_osd_request *osd_req;
 
@@ -257,6 +290,7 @@ struct rbd_img_request {
                struct rbd_obj_request  *obj_request;   /* obj req initiator */
        };
        struct page             **copyup_pages;
+       u32                     copyup_page_count;
        spinlock_t              completion_lock;/* protects next_completion */
        u32                     next_completion;
        rbd_img_callback_t      callback;
@@ -311,6 +345,7 @@ struct rbd_device {
 
        struct rbd_spec         *parent_spec;
        u64                     parent_overlap;
+       atomic_t                parent_ref;
        struct rbd_device       *parent;
 
        /* protects updating the header */
@@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
                       size_t count);
 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
                          size_t count);
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
+static void rbd_spec_put(struct rbd_spec *spec);
 
 static struct bus_attribute rbd_bus_attrs[] = {
        __ATTR(add, S_IWUSR, NULL, rbd_add),
@@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
 
 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
                                        u64 snap_id);
 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -482,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
 };
 
 /*
- * Initialize an rbd client instance.
- * We own *ceph_opts.
+ * Initialize an rbd client instance.  Success or not, this function
+ * consumes ceph_opts.
  */
 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
 {
@@ -638,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
 
 /*
  * Get a ceph client with specific addr and configuration, if one does
- * not exist create it.
+ * not exist create it.  Either way, ceph_opts is consumed by this
+ * function.
  */
 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
 {
@@ -726,88 +764,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
 }
 
 /*
- * Create a new header structure, translate header format from the on-disk
- * header.
+ * Fill an rbd image header with information from the given format 1
+ * on-disk header.
  */
-static int rbd_header_from_disk(struct rbd_image_header *header,
+static int rbd_header_from_disk(struct rbd_device *rbd_dev,
                                 struct rbd_image_header_ondisk *ondisk)
 {
+       struct rbd_image_header *header = &rbd_dev->header;
+       bool first_time = header->object_prefix == NULL;
+       struct ceph_snap_context *snapc;
+       char *object_prefix = NULL;
+       char *snap_names = NULL;
+       u64 *snap_sizes = NULL;
        u32 snap_count;
-       size_t len;
        size_t size;
+       int ret = -ENOMEM;
        u32 i;
 
-       memset(header, 0, sizeof (*header));
+       /* Allocate this now to avoid having to handle failure below */
 
-       snap_count = le32_to_cpu(ondisk->snap_count);
+       if (first_time) {
+               size_t len;
 
-       len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
-       header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
-       if (!header->object_prefix)
-               return -ENOMEM;
-       memcpy(header->object_prefix, ondisk->object_prefix, len);
-       header->object_prefix[len] = '\0';
+               len = strnlen(ondisk->object_prefix,
+                               sizeof (ondisk->object_prefix));
+               object_prefix = kmalloc(len + 1, GFP_KERNEL);
+               if (!object_prefix)
+                       return -ENOMEM;
+               memcpy(object_prefix, ondisk->object_prefix, len);
+               object_prefix[len] = '\0';
+       }
+
+       /* Allocate the snapshot context and fill it in */
 
+       snap_count = le32_to_cpu(ondisk->snap_count);
+       snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
+       if (!snapc)
+               goto out_err;
+       snapc->seq = le64_to_cpu(ondisk->snap_seq);
        if (snap_count) {
+               struct rbd_image_snap_ondisk *snaps;
                u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
 
-               /* Save a copy of the snapshot names */
+               /* We'll keep a copy of the snapshot names... */
 
-               if (snap_names_len > (u64) SIZE_MAX)
-                       return -EIO;
-               header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
-               if (!header->snap_names)
+               if (snap_names_len > (u64)SIZE_MAX)
+                       goto out_2big;
+               snap_names = kmalloc(snap_names_len, GFP_KERNEL);
+               if (!snap_names)
+                       goto out_err;
+
+               /* ...as well as the array of their sizes. */
+
+               size = snap_count * sizeof (*header->snap_sizes);
+               snap_sizes = kmalloc(size, GFP_KERNEL);
+               if (!snap_sizes)
                        goto out_err;
+
                /*
-                * Note that rbd_dev_v1_header_read() guarantees
-                * the ondisk buffer we're working with has
+                * Copy the names, and fill in each snapshot's id
+                * and size.
+                *
+                * Note that rbd_dev_v1_header_info() guarantees the
+                * ondisk buffer we're working with has
                 * snap_names_len bytes beyond the end of the
                 * snapshot id array, this memcpy() is safe.
                 */
-               memcpy(header->snap_names, &ondisk->snaps[snap_count],
-                       snap_names_len);
+               memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
+               snaps = ondisk->snaps;
+               for (i = 0; i < snap_count; i++) {
+                       snapc->snaps[i] = le64_to_cpu(snaps[i].id);
+                       snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
+               }
+       }
 
-               /* Record each snapshot's size */
+       /* We won't fail any more, fill in the header */
 
-               size = snap_count * sizeof (*header->snap_sizes);
-               header->snap_sizes = kmalloc(size, GFP_KERNEL);
-               if (!header->snap_sizes)
-                       goto out_err;
-               for (i = 0; i < snap_count; i++)
-                       header->snap_sizes[i] =
-                               le64_to_cpu(ondisk->snaps[i].image_size);
+       down_write(&rbd_dev->header_rwsem);
+       if (first_time) {
+               header->object_prefix = object_prefix;
+               header->obj_order = ondisk->options.order;
+               header->crypt_type = ondisk->options.crypt_type;
+               header->comp_type = ondisk->options.comp_type;
+               /* The rest aren't used for format 1 images */
+               header->stripe_unit = 0;
+               header->stripe_count = 0;
+               header->features = 0;
        } else {
-               header->snap_names = NULL;
-               header->snap_sizes = NULL;
+               ceph_put_snap_context(header->snapc);
+               kfree(header->snap_names);
+               kfree(header->snap_sizes);
        }
 
-       header->features = 0;   /* No features support in v1 images */
-       header->obj_order = ondisk->options.order;
-       header->crypt_type = ondisk->options.crypt_type;
-       header->comp_type = ondisk->options.comp_type;
-
-       /* Allocate and fill in the snapshot context */
+       /* The remaining fields always get updated (when we refresh) */
 
        header->image_size = le64_to_cpu(ondisk->image_size);
+       header->snapc = snapc;
+       header->snap_names = snap_names;
+       header->snap_sizes = snap_sizes;
 
-       header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
-       if (!header->snapc)
-               goto out_err;
-       header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
-       for (i = 0; i < snap_count; i++)
-               header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
+       /* Make sure mapping size is consistent with header info */
 
-       return 0;
+       if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
+               if (rbd_dev->mapping.size != header->image_size)
+                       rbd_dev->mapping.size = header->image_size;
 
+       up_write(&rbd_dev->header_rwsem);
+
+       return 0;
+out_2big:
+       ret = -EIO;
 out_err:
-       kfree(header->snap_sizes);
-       header->snap_sizes = NULL;
-       kfree(header->snap_names);
-       header->snap_names = NULL;
-       kfree(header->object_prefix);
-       header->object_prefix = NULL;
+       kfree(snap_sizes);
+       kfree(snap_names);
+       ceph_put_snap_context(snapc);
+       kfree(object_prefix);
 
-       return -ENOMEM;
+       return ret;
 }
 
 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
@@ -934,20 +1007,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
 
 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
 {
-       const char *snap_name = rbd_dev->spec->snap_name;
-       u64 snap_id;
+       u64 snap_id = rbd_dev->spec->snap_id;
        u64 size = 0;
        u64 features = 0;
        int ret;
 
-       if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
-               snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
-               if (snap_id == CEPH_NOSNAP)
-                       return -ENOENT;
-       } else {
-               snap_id = CEPH_NOSNAP;
-       }
-
        ret = rbd_snap_size(rbd_dev, snap_id, &size);
        if (ret)
                return ret;
@@ -958,11 +1022,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
        rbd_dev->mapping.size = size;
        rbd_dev->mapping.features = features;
 
-       /* If we are mapping a snapshot it must be marked read-only */
-
-       if (snap_id != CEPH_NOSNAP)
-               rbd_dev->mapping.read_only = true;
-
        return 0;
 }
 
@@ -970,14 +1029,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
 {
        rbd_dev->mapping.size = 0;
        rbd_dev->mapping.features = 0;
-       rbd_dev->mapping.read_only = true;
-}
-
-static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
-{
-       rbd_dev->mapping.size = 0;
-       rbd_dev->mapping.features = 0;
-       rbd_dev->mapping.read_only = true;
 }
 
 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
@@ -1342,20 +1393,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
        kref_put(&obj_request->kref, rbd_obj_request_destroy);
 }
 
-static void rbd_img_request_get(struct rbd_img_request *img_request)
-{
-       dout("%s: img %p (was %d)\n", __func__, img_request,
-               atomic_read(&img_request->kref.refcount));
-       kref_get(&img_request->kref);
-}
-
+static bool img_request_child_test(struct rbd_img_request *img_request);
+static void rbd_parent_request_destroy(struct kref *kref);
 static void rbd_img_request_destroy(struct kref *kref);
 static void rbd_img_request_put(struct rbd_img_request *img_request)
 {
        rbd_assert(img_request != NULL);
        dout("%s: img %p (was %d)\n", __func__, img_request,
                atomic_read(&img_request->kref.refcount));
-       kref_put(&img_request->kref, rbd_img_request_destroy);
+       if (img_request_child_test(img_request))
+               kref_put(&img_request->kref, rbd_parent_request_destroy);
+       else
+               kref_put(&img_request->kref, rbd_img_request_destroy);
 }
 
 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
@@ -1472,6 +1521,12 @@ static void img_request_child_set(struct rbd_img_request *img_request)
        smp_mb();
 }
 
+static void img_request_child_clear(struct rbd_img_request *img_request)
+{
+       clear_bit(IMG_REQ_CHILD, &img_request->flags);
+       smp_mb();
+}
+
 static bool img_request_child_test(struct rbd_img_request *img_request)
 {
        smp_mb();
@@ -1484,6 +1539,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request)
        smp_mb();
 }
 
+static void img_request_layered_clear(struct rbd_img_request *img_request)
+{
+       clear_bit(IMG_REQ_LAYERED, &img_request->flags);
+       smp_mb();
+}
+
 static bool img_request_layered_test(struct rbd_img_request *img_request)
 {
        smp_mb();
@@ -1827,6 +1888,74 @@ static void rbd_obj_request_destroy(struct kref *kref)
        kmem_cache_free(rbd_obj_request_cache, obj_request);
 }
 
+/* It's OK to call this for a device with no parent */
+
+static void rbd_spec_put(struct rbd_spec *spec);
+static void rbd_dev_unparent(struct rbd_device *rbd_dev)
+{
+       rbd_dev_remove_parent(rbd_dev);
+       rbd_spec_put(rbd_dev->parent_spec);
+       rbd_dev->parent_spec = NULL;
+       rbd_dev->parent_overlap = 0;
+}
+
+/*
+ * Parent image reference counting is used to determine when an
+ * image's parent fields can be safely torn down--after there are no
+ * more in-flight requests to the parent image.  When the last
+ * reference is dropped, cleaning them up is safe.
+ */
+static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
+{
+       int counter;
+
+       if (!rbd_dev->parent_spec)
+               return;
+
+       counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
+       if (counter > 0)
+               return;
+
+       /* Last reference; clean up parent data structures */
+
+       if (!counter)
+               rbd_dev_unparent(rbd_dev);
+       else
+               rbd_warn(rbd_dev, "parent reference underflow\n");
+}
+
+/*
+ * If an image has a non-zero parent overlap, get a reference to its
+ * parent.
+ *
+ * We must get the reference before checking for the overlap to
+ * coordinate properly with zeroing the parent overlap in
+ * rbd_dev_v2_parent_info() when an image gets flattened.  We
+ * drop it again if there is no overlap.
+ *
+ * Returns true if the rbd device has a parent with a non-zero
+ * overlap and a reference for it was successfully taken, or
+ * false otherwise.
+ */
+static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
+{
+       int counter;
+
+       if (!rbd_dev->parent_spec)
+               return false;
+
+       counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+       if (counter > 0 && rbd_dev->parent_overlap)
+               return true;
+
+       /* Image was flattened, but parent is not yet torn down */
+
+       if (counter < 0)
+               rbd_warn(rbd_dev, "parent reference overflow\n");
+
+       return false;
+}
+
 /*
  * Caller is responsible for filling in the list of object requests
  * that comprises the image request, and the Linux request pointer
@@ -1835,8 +1964,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
 static struct rbd_img_request *rbd_img_request_create(
                                        struct rbd_device *rbd_dev,
                                        u64 offset, u64 length,
-                                       bool write_request,
-                                       bool child_request)
+                                       bool write_request)
 {
        struct rbd_img_request *img_request;
 
@@ -1861,9 +1989,7 @@ static struct rbd_img_request *rbd_img_request_create(
        } else {
                img_request->snap_id = rbd_dev->spec->snap_id;
        }
-       if (child_request)
-               img_request_child_set(img_request);
-       if (rbd_dev->parent_spec)
+       if (rbd_dev_parent_get(rbd_dev))
                img_request_layered_set(img_request);
        spin_lock_init(&img_request->completion_lock);
        img_request->next_completion = 0;
@@ -1873,9 +1999,6 @@ static struct rbd_img_request *rbd_img_request_create(
        INIT_LIST_HEAD(&img_request->obj_requests);
        kref_init(&img_request->kref);
 
-       rbd_img_request_get(img_request);       /* Avoid a warning */
-       rbd_img_request_put(img_request);       /* TEMPORARY */
-
        dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
                write_request ? "write" : "read", offset, length,
                img_request);
@@ -1897,15 +2020,54 @@ static void rbd_img_request_destroy(struct kref *kref)
                rbd_img_obj_request_del(img_request, obj_request);
        rbd_assert(img_request->obj_request_count == 0);
 
+       if (img_request_layered_test(img_request)) {
+               img_request_layered_clear(img_request);
+               rbd_dev_parent_put(img_request->rbd_dev);
+       }
+
        if (img_request_write_test(img_request))
                ceph_put_snap_context(img_request->snapc);
 
-       if (img_request_child_test(img_request))
-               rbd_obj_request_put(img_request->obj_request);
-
        kmem_cache_free(rbd_img_request_cache, img_request);
 }
 
+static struct rbd_img_request *rbd_parent_request_create(
+                                       struct rbd_obj_request *obj_request,
+                                       u64 img_offset, u64 length)
+{
+       struct rbd_img_request *parent_request;
+       struct rbd_device *rbd_dev;
+
+       rbd_assert(obj_request->img_request);
+       rbd_dev = obj_request->img_request->rbd_dev;
+
+       parent_request = rbd_img_request_create(rbd_dev->parent,
+                                               img_offset, length, false);
+       if (!parent_request)
+               return NULL;
+
+       img_request_child_set(parent_request);
+       rbd_obj_request_get(obj_request);
+       parent_request->obj_request = obj_request;
+
+       return parent_request;
+}
+
+static void rbd_parent_request_destroy(struct kref *kref)
+{
+       struct rbd_img_request *parent_request;
+       struct rbd_obj_request *orig_request;
+
+       parent_request = container_of(kref, struct rbd_img_request, kref);
+       orig_request = parent_request->obj_request;
+
+       parent_request->obj_request = NULL;
+       rbd_obj_request_put(orig_request);
+       img_request_child_clear(parent_request);
+
+       rbd_img_request_destroy(kref);
+}
+
 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
@@ -2114,7 +2276,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
        struct rbd_device *rbd_dev;
-       u64 length;
+       struct page **pages;
        u32 page_count;
 
        rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
@@ -2124,12 +2286,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
 
        rbd_dev = img_request->rbd_dev;
        rbd_assert(rbd_dev);
-       length = (u64)1 << rbd_dev->header.obj_order;
-       page_count = (u32)calc_pages_for(0, length);
 
-       rbd_assert(obj_request->copyup_pages);
-       ceph_release_page_vector(obj_request->copyup_pages, page_count);
+       pages = obj_request->copyup_pages;
+       rbd_assert(pages != NULL);
        obj_request->copyup_pages = NULL;
+       page_count = obj_request->copyup_page_count;
+       rbd_assert(page_count);
+       obj_request->copyup_page_count = 0;
+       ceph_release_page_vector(pages, page_count);
 
        /*
         * We want the transfer count to reflect the size of the
@@ -2153,9 +2317,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
        struct ceph_osd_client *osdc;
        struct rbd_device *rbd_dev;
        struct page **pages;
-       int result;
-       u64 obj_size;
-       u64 xferred;
+       u32 page_count;
+       int img_result;
+       u64 parent_length;
+       u64 offset;
+       u64 length;
 
        rbd_assert(img_request_child_test(img_request));
 
@@ -2164,46 +2330,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
        pages = img_request->copyup_pages;
        rbd_assert(pages != NULL);
        img_request->copyup_pages = NULL;
+       page_count = img_request->copyup_page_count;
+       rbd_assert(page_count);
+       img_request->copyup_page_count = 0;
 
        orig_request = img_request->obj_request;
        rbd_assert(orig_request != NULL);
-       rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
-       result = img_request->result;
-       obj_size = img_request->length;
-       xferred = img_request->xferred;
+       rbd_assert(obj_request_type_valid(orig_request->type));
+       img_result = img_request->result;
+       parent_length = img_request->length;
+       rbd_assert(parent_length == img_request->xferred);
+       rbd_img_request_put(img_request);
 
-       rbd_dev = img_request->rbd_dev;
+       rbd_assert(orig_request->img_request);
+       rbd_dev = orig_request->img_request->rbd_dev;
        rbd_assert(rbd_dev);
-       rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
 
-       rbd_img_request_put(img_request);
+       /*
+        * If the overlap has become 0 (most likely because the
+        * image has been flattened) we need to free the pages
+        * and re-submit the original write request.
+        */
+       if (!rbd_dev->parent_overlap) {
+               struct ceph_osd_client *osdc;
 
-       if (result)
-               goto out_err;
+               ceph_release_page_vector(pages, page_count);
+               osdc = &rbd_dev->rbd_client->client->osdc;
+               img_result = rbd_obj_request_submit(osdc, orig_request);
+               if (!img_result)
+                       return;
+       }
 
-       /* Allocate the new copyup osd request for the original request */
+       if (img_result)
+               goto out_err;
 
-       result = -ENOMEM;
-       rbd_assert(!orig_request->osd_req);
+       /*
+        * The original osd request is of no use to use any more.
+        * We need a new one that can hold the two ops in a copyup
+        * request.  Allocate the new copyup osd request for the
+        * original request, and release the old one.
+        */
+       img_result = -ENOMEM;
        osd_req = rbd_osd_req_create_copyup(orig_request);
        if (!osd_req)
                goto out_err;
+       rbd_osd_req_destroy(orig_request->osd_req);
        orig_request->osd_req = osd_req;
        orig_request->copyup_pages = pages;
+       orig_request->copyup_page_count = page_count;
 
        /* Initialize the copyup op */
 
        osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
-       osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
+       osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
                                                false, false);
 
        /* Then the original write request op */
 
+       offset = orig_request->offset;
+       length = orig_request->length;
        osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
-                                       orig_request->offset,
-                                       orig_request->length, 0, 0);
-       osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
-                                       orig_request->length);
+                                       offset, length, 0, 0);
+       if (orig_request->type == OBJ_REQUEST_BIO)
+               osd_req_op_extent_osd_data_bio(osd_req, 1,
+                                       orig_request->bio_list, length);
+       else
+               osd_req_op_extent_osd_data_pages(osd_req, 1,
+                                       orig_request->pages, length,
+                                       offset & ~PAGE_MASK, false, false);
 
        rbd_osd_req_format_write(orig_request);
 
@@ -2211,13 +2405,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
 
        orig_request->callback = rbd_img_obj_copyup_callback;
        osdc = &rbd_dev->rbd_client->client->osdc;
-       result = rbd_obj_request_submit(osdc, orig_request);
-       if (!result)
+       img_result = rbd_obj_request_submit(osdc, orig_request);
+       if (!img_result)
                return;
 out_err:
        /* Record the error code and complete the request */
 
-       orig_request->result = result;
+       orig_request->result = img_result;
        orig_request->xferred = 0;
        obj_request_done_set(orig_request);
        rbd_obj_request_complete(orig_request);
@@ -2249,22 +2443,13 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
        int result;
 
        rbd_assert(obj_request_img_data_test(obj_request));
-       rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+       rbd_assert(obj_request_type_valid(obj_request->type));
 
        img_request = obj_request->img_request;
        rbd_assert(img_request != NULL);
        rbd_dev = img_request->rbd_dev;
        rbd_assert(rbd_dev->parent != NULL);
 
-       /*
-        * First things first.  The original osd request is of no
-        * use to use any more, we'll need a new one that can hold
-        * the two ops in a copyup request.  We'll get that later,
-        * but for now we can release the old one.
-        */
-       rbd_osd_req_destroy(obj_request->osd_req);
-       obj_request->osd_req = NULL;
-
        /*
         * Determine the byte range covered by the object in the
         * child image to which the original request was to be sent.
@@ -2295,18 +2480,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
        }
 
        result = -ENOMEM;
-       parent_request = rbd_img_request_create(rbd_dev->parent,
-                                               img_offset, length,
-                                               false, true);
+       parent_request = rbd_parent_request_create(obj_request,
+                                               img_offset, length);
        if (!parent_request)
                goto out_err;
-       rbd_obj_request_get(obj_request);
-       parent_request->obj_request = obj_request;
 
        result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
        if (result)
                goto out_err;
        parent_request->copyup_pages = pages;
+       parent_request->copyup_page_count = page_count;
 
        parent_request->callback = rbd_img_obj_parent_read_full_callback;
        result = rbd_img_request_submit(parent_request);
@@ -2314,6 +2497,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
                return 0;
 
        parent_request->copyup_pages = NULL;
+       parent_request->copyup_page_count = 0;
        parent_request->obj_request = NULL;
        rbd_obj_request_put(obj_request);
 out_err:
@@ -2331,6 +2515,7 @@ out_err:
 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_obj_request *orig_request;
+       struct rbd_device *rbd_dev;
        int result;
 
        rbd_assert(!obj_request_img_data_test(obj_request));
@@ -2353,8 +2538,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
                obj_request->xferred, obj_request->length);
        rbd_obj_request_put(obj_request);
 
-       rbd_assert(orig_request);
-       rbd_assert(orig_request->img_request);
+       /*
+        * If the overlap has become 0 (most likely because the
+        * image has been flattened) we need to free the pages
+        * and re-submit the original write request.
+        */
+       rbd_dev = orig_request->img_request->rbd_dev;
+       if (!rbd_dev->parent_overlap) {
+               struct ceph_osd_client *osdc;
+
+               rbd_obj_request_put(orig_request);
+               osdc = &rbd_dev->rbd_client->client->osdc;
+               result = rbd_obj_request_submit(osdc, orig_request);
+               if (!result)
+                       return;
+       }
 
        /*
         * Our only purpose here is to determine whether the object
@@ -2512,14 +2710,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
        struct rbd_obj_request *obj_request;
        struct rbd_device *rbd_dev;
        u64 obj_end;
+       u64 img_xferred;
+       int img_result;
 
        rbd_assert(img_request_child_test(img_request));
 
+       /* First get what we need from the image request and release it */
+
        obj_request = img_request->obj_request;
+       img_xferred = img_request->xferred;
+       img_result = img_request->result;
+       rbd_img_request_put(img_request);
+
+       /*
+        * If the overlap has become 0 (most likely because the
+        * image has been flattened) we need to re-submit the
+        * original request.
+        */
        rbd_assert(obj_request);
        rbd_assert(obj_request->img_request);
+       rbd_dev = obj_request->img_request->rbd_dev;
+       if (!rbd_dev->parent_overlap) {
+               struct ceph_osd_client *osdc;
 
-       obj_request->result = img_request->result;
+               osdc = &rbd_dev->rbd_client->client->osdc;
+               img_result = rbd_obj_request_submit(osdc, obj_request);
+               if (!img_result)
+                       return;
+       }
+
+       obj_request->result = img_result;
        if (obj_request->result)
                goto out;
 
@@ -2532,7 +2752,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
         */
        rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
        obj_end = obj_request->img_offset + obj_request->length;
-       rbd_dev = obj_request->img_request->rbd_dev;
        if (obj_end > rbd_dev->parent_overlap) {
                u64 xferred = 0;
 
@@ -2540,43 +2759,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
                        xferred = rbd_dev->parent_overlap -
                                        obj_request->img_offset;
 
-               obj_request->xferred = min(img_request->xferred, xferred);
+               obj_request->xferred = min(img_xferred, xferred);
        } else {
-               obj_request->xferred = img_request->xferred;
+               obj_request->xferred = img_xferred;
        }
 out:
-       rbd_img_request_put(img_request);
        rbd_img_obj_request_read_callback(obj_request);
        rbd_obj_request_complete(obj_request);
 }
 
 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
 {
-       struct rbd_device *rbd_dev;
        struct rbd_img_request *img_request;
        int result;
 
        rbd_assert(obj_request_img_data_test(obj_request));
        rbd_assert(obj_request->img_request != NULL);
        rbd_assert(obj_request->result == (s32) -ENOENT);
-       rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+       rbd_assert(obj_request_type_valid(obj_request->type));
 
-       rbd_dev = obj_request->img_request->rbd_dev;
-       rbd_assert(rbd_dev->parent != NULL);
        /* rbd_read_finish(obj_request, obj_request->length); */
-       img_request = rbd_img_request_create(rbd_dev->parent,
+       img_request = rbd_parent_request_create(obj_request,
                                                obj_request->img_offset,
-                                               obj_request->length,
-                                               false, true);
+                                               obj_request->length);
        result = -ENOMEM;
        if (!img_request)
                goto out_err;
 
-       rbd_obj_request_get(obj_request);
-       img_request->obj_request = obj_request;
-
-       result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
-                                       obj_request->bio_list);
+       if (obj_request->type == OBJ_REQUEST_BIO)
+               result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+                                               obj_request->bio_list);
+       else
+               result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
+                                               obj_request->pages);
        if (result)
                goto out_err;
 
@@ -2626,6 +2841,7 @@ out:
 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
 {
        struct rbd_device *rbd_dev = (struct rbd_device *)data;
+       int ret;
 
        if (!rbd_dev)
                return;
@@ -2633,7 +2849,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
        dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
                rbd_dev->header_name, (unsigned long long)notify_id,
                (unsigned int)opcode);
-       (void)rbd_dev_refresh(rbd_dev);
+       ret = rbd_dev_refresh(rbd_dev);
+       if (ret)
+               rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
 
        rbd_obj_notify_ack(rbd_dev, notify_id);
 }
@@ -2642,7 +2860,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
  * Request sync osd watch/unwatch.  The value of "start" determines
  * whether a watch request is being initiated or torn down.
  */
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
 {
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
        struct rbd_obj_request *obj_request;
@@ -2676,7 +2894,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
                                        rbd_dev->watch_request->osd_req);
 
        osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
-                               rbd_dev->watch_event->cookie, 0, start);
+                               rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
        rbd_osd_req_format_write(obj_request);
 
        ret = rbd_obj_request_submit(osdc, obj_request);
@@ -2869,9 +3087,16 @@ static void rbd_request_fn(struct request_queue *q)
                        goto end_request;       /* Shouldn't happen */
                }
 
+               result = -EIO;
+               if (offset + length > rbd_dev->mapping.size) {
+                       rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
+                               offset, length, rbd_dev->mapping.size);
+                       goto end_request;
+               }
+
                result = -ENOMEM;
                img_request = rbd_img_request_create(rbd_dev, offset, length,
-                                                       write_request, false);
+                                                       write_request);
                if (!img_request)
                        goto end_request;
 
@@ -3022,17 +3247,11 @@ out:
 }
 
 /*
- * Read the complete header for the given rbd device.
- *
- * Returns a pointer to a dynamically-allocated buffer containing
- * the complete and validated header.  Caller can pass the address
- * of a variable that will be filled in with the version of the
- * header object at the time it was read.
- *
- * Returns a pointer-coded errno if a failure occurs.
+ * Read the complete header for the given rbd device.  On successful
+ * return, the rbd_dev->header field will contain up-to-date
+ * information about the image.
  */
-static struct rbd_image_header_ondisk *
-rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
+static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header_ondisk *ondisk = NULL;
        u32 snap_count = 0;
@@ -3057,22 +3276,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
                size += names_size;
                ondisk = kmalloc(size, GFP_KERNEL);
                if (!ondisk)
-                       return ERR_PTR(-ENOMEM);
+                       return -ENOMEM;
 
                ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
                                       0, size, ondisk);
                if (ret < 0)
-                       goto out_err;
+                       goto out;
                if ((size_t)ret < size) {
                        ret = -ENXIO;
                        rbd_warn(rbd_dev, "short header read (want %zd got %d)",
                                size, ret);
-                       goto out_err;
+                       goto out;
                }
                if (!rbd_dev_ondisk_valid(ondisk)) {
                        ret = -ENXIO;
                        rbd_warn(rbd_dev, "invalid header");
-                       goto out_err;
+                       goto out;
                }
 
                names_size = le64_to_cpu(ondisk->snap_names_len);
@@ -3080,85 +3299,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
                snap_count = le32_to_cpu(ondisk->snap_count);
        } while (snap_count != want_count);
 
-       return ondisk;
-
-out_err:
-       kfree(ondisk);
-
-       return ERR_PTR(ret);
-}
-
-/*
- * reload the ondisk the header
- */
-static int rbd_read_header(struct rbd_device *rbd_dev,
-                          struct rbd_image_header *header)
-{
-       struct rbd_image_header_ondisk *ondisk;
-       int ret;
-
-       ondisk = rbd_dev_v1_header_read(rbd_dev);
-       if (IS_ERR(ondisk))
-               return PTR_ERR(ondisk);
-       ret = rbd_header_from_disk(header, ondisk);
+       ret = rbd_header_from_disk(rbd_dev, ondisk);
+out:
        kfree(ondisk);
 
        return ret;
 }
 
-static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
-{
-       if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
-               return;
-
-       if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
-               sector_t size;
-
-               rbd_dev->mapping.size = rbd_dev->header.image_size;
-               size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
-               dout("setting size to %llu sectors", (unsigned long long)size);
-               set_capacity(rbd_dev->disk, size);
-       }
-}
-
-/*
- * only read the first part of the ondisk header, without the snaps info
- */
-static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
-{
-       int ret;
-       struct rbd_image_header h;
-
-       ret = rbd_read_header(rbd_dev, &h);
-       if (ret < 0)
-               return ret;
-
-       down_write(&rbd_dev->header_rwsem);
-
-       /* Update image size, and check for resize of mapped image */
-       rbd_dev->header.image_size = h.image_size;
-       rbd_update_mapping_size(rbd_dev);
-
-       /* rbd_dev->header.object_prefix shouldn't change */
-       kfree(rbd_dev->header.snap_sizes);
-       kfree(rbd_dev->header.snap_names);
-       /* osd requests may still refer to snapc */
-       ceph_put_snap_context(rbd_dev->header.snapc);
-
-       rbd_dev->header.image_size = h.image_size;
-       rbd_dev->header.snapc = h.snapc;
-       rbd_dev->header.snap_names = h.snap_names;
-       rbd_dev->header.snap_sizes = h.snap_sizes;
-       /* Free the extra copy of the object prefix */
-       if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
-               rbd_warn(rbd_dev, "object prefix changed (ignoring)");
-       kfree(h.object_prefix);
-
-       up_write(&rbd_dev->header_rwsem);
-
-       return ret;
-}
-
 /*
  * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
  * has disappeared from the (just updated) snapshot context.
@@ -3180,26 +3327,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
 
 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
 {
-       u64 image_size;
+       u64 mapping_size;
        int ret;
 
        rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
-       image_size = rbd_dev->header.image_size;
+       mapping_size = rbd_dev->mapping.size;
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
        if (rbd_dev->image_format == 1)
-               ret = rbd_dev_v1_refresh(rbd_dev);
+               ret = rbd_dev_v1_header_info(rbd_dev);
        else
-               ret = rbd_dev_v2_refresh(rbd_dev);
+               ret = rbd_dev_v2_header_info(rbd_dev);
 
        /* If it's a mapped snapshot, validate its EXISTS flag */
 
        rbd_exists_validate(rbd_dev);
        mutex_unlock(&ctl_mutex);
-       if (ret)
-               rbd_warn(rbd_dev, "got notification but failed to "
-                          " update snaps: %d\n", ret);
-       if (image_size != rbd_dev->header.image_size)
+       if (mapping_size != rbd_dev->mapping.size) {
+               sector_t size;
+
+               size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
+               dout("setting size to %llu sectors", (unsigned long long)size);
+               set_capacity(rbd_dev->disk, size);
                revalidate_disk(rbd_dev->disk);
+       }
 
        return ret;
 }
@@ -3403,6 +3553,8 @@ static ssize_t rbd_image_refresh(struct device *dev,
        int ret;
 
        ret = rbd_dev_refresh(rbd_dev);
+       if (ret)
+               rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
 
        return ret < 0 ? ret : size;
 }
@@ -3501,6 +3653,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
 
        spin_lock_init(&rbd_dev->lock);
        rbd_dev->flags = 0;
+       atomic_set(&rbd_dev->parent_ref, 0);
        INIT_LIST_HEAD(&rbd_dev->node);
        init_rwsem(&rbd_dev->header_rwsem);
 
@@ -3650,6 +3803,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        __le64 snapid;
        void *p;
        void *end;
+       u64 pool_id;
        char *image_id;
        u64 overlap;
        int ret;
@@ -3680,18 +3834,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        p = reply_buf;
        end = reply_buf + ret;
        ret = -ERANGE;
-       ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
-       if (parent_spec->pool_id == CEPH_NOPOOL)
+       ceph_decode_64_safe(&p, end, pool_id, out_err);
+       if (pool_id == CEPH_NOPOOL) {
+               /*
+                * Either the parent never existed, or we have
+                * record of it but the image got flattened so it no
+                * longer has a parent.  When the parent of a
+                * layered image disappears we immediately set the
+                * overlap to 0.  The effect of this is that all new
+                * requests will be treated as if the image had no
+                * parent.
+                */
+               if (rbd_dev->parent_overlap) {
+                       rbd_dev->parent_overlap = 0;
+                       smp_mb();
+                       rbd_dev_parent_put(rbd_dev);
+                       pr_info("%s: clone image has been flattened\n",
+                               rbd_dev->disk->disk_name);
+               }
+
                goto out;       /* No parent?  No problem. */
+       }
 
        /* The ceph file layout needs to fit pool id in 32 bits */
 
        ret = -EIO;
-       if (parent_spec->pool_id > (u64)U32_MAX) {
+       if (pool_id > (u64)U32_MAX) {
                rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
-                       (unsigned long long)parent_spec->pool_id, U32_MAX);
+                       (unsigned long long)pool_id, U32_MAX);
                goto out_err;
        }
+       parent_spec->pool_id = pool_id;
 
        image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
        if (IS_ERR(image_id)) {
@@ -3702,9 +3875,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
        ceph_decode_64_safe(&p, end, overlap, out_err);
 
-       rbd_dev->parent_overlap = overlap;
-       rbd_dev->parent_spec = parent_spec;
-       parent_spec = NULL;     /* rbd_dev now owns this */
+       if (overlap) {
+               rbd_spec_put(rbd_dev->parent_spec);
+               rbd_dev->parent_spec = parent_spec;
+               parent_spec = NULL;     /* rbd_dev now owns this */
+               rbd_dev->parent_overlap = overlap;
+       } else {
+               rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
+       }
 out:
        ret = 0;
 out_err:
@@ -4002,6 +4180,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
        for (i = 0; i < snap_count; i++)
                snapc->snaps[i] = ceph_decode_64(&p);
 
+       ceph_put_snap_context(rbd_dev->header.snapc);
        rbd_dev->header.snapc = snapc;
 
        dout("  snap context seq = %llu, snap_count = %u\n",
@@ -4053,21 +4232,56 @@ out:
        return snap_name;
 }
 
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
 {
+       bool first_time = rbd_dev->header.object_prefix == NULL;
        int ret;
 
        down_write(&rbd_dev->header_rwsem);
 
+       if (first_time) {
+               ret = rbd_dev_v2_header_onetime(rbd_dev);
+               if (ret)
+                       goto out;
+       }
+
+       /*
+        * If the image supports layering, get the parent info.  We
+        * need to probe the first time regardless.  Thereafter we
+        * only need to if there's a parent, to see if it has
+        * disappeared due to the mapped image getting flattened.
+        */
+       if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
+                       (first_time || rbd_dev->parent_spec)) {
+               bool warn;
+
+               ret = rbd_dev_v2_parent_info(rbd_dev);
+               if (ret)
+                       goto out;
+
+               /*
+                * Print a warning if this is the initial probe and
+                * the image has a parent.  Don't print it if the
+                * image now being probed is itself a parent.  We
+                * can tell at this point because we won't know its
+                * pool name yet (just its pool id).
+                */
+               warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
+               if (first_time && warn)
+                       rbd_warn(rbd_dev, "WARNING: kernel layering "
+                                       "is EXPERIMENTAL!");
+       }
+
        ret = rbd_dev_v2_image_size(rbd_dev);
        if (ret)
                goto out;
-       rbd_update_mapping_size(rbd_dev);
+
+       if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
+               if (rbd_dev->mapping.size != rbd_dev->header.image_size)
+                       rbd_dev->mapping.size = rbd_dev->header.image_size;
 
        ret = rbd_dev_v2_snap_context(rbd_dev);
        dout("rbd_dev_v2_snap_context returned %d\n", ret);
-       if (ret)
-               goto out;
 out:
        up_write(&rbd_dev->header_rwsem);
 
@@ -4484,16 +4698,18 @@ out:
        return ret;
 }
 
-/* Undo whatever state changes are made by v1 or v2 image probe */
-
+/*
+ * Undo whatever state changes are made by v1 or v2 header info
+ * call.
+ */
 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header *header;
 
-       rbd_dev_remove_parent(rbd_dev);
-       rbd_spec_put(rbd_dev->parent_spec);
-       rbd_dev->parent_spec = NULL;
-       rbd_dev->parent_overlap = 0;
+       /* Drop parent reference unless it's already been done (or none) */
+
+       if (rbd_dev->parent_overlap)
+               rbd_dev_parent_put(rbd_dev);
 
        /* Free dynamic fields from the header, then zero it out */
 
@@ -4505,72 +4721,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
        memset(header, 0, sizeof (*header));
 }
 
-static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
 {
        int ret;
 
-       /* Populate rbd image metadata */
-
-       ret = rbd_read_header(rbd_dev, &rbd_dev->header);
-       if (ret < 0)
-               goto out_err;
-
-       /* Version 1 images have no parent (no layering) */
-
-       rbd_dev->parent_spec = NULL;
-       rbd_dev->parent_overlap = 0;
-
-       dout("discovered version 1 image, header name is %s\n",
-               rbd_dev->header_name);
-
-       return 0;
-
-out_err:
-       kfree(rbd_dev->header_name);
-       rbd_dev->header_name = NULL;
-       kfree(rbd_dev->spec->image_id);
-       rbd_dev->spec->image_id = NULL;
-
-       return ret;
-}
-
-static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
-{
-       int ret;
-
-       ret = rbd_dev_v2_image_size(rbd_dev);
-       if (ret)
-               goto out_err;
-
-       /* Get the object prefix (a.k.a. block_name) for the image */
-
        ret = rbd_dev_v2_object_prefix(rbd_dev);
        if (ret)
                goto out_err;
 
-       /* Get the and check features for the image */
-
+       /*
+        * Get the and check features for the image.  Currently the
+        * features are assumed to never change.
+        */
        ret = rbd_dev_v2_features(rbd_dev);
        if (ret)
                goto out_err;
 
-       /* If the image supports layering, get the parent info */
-
-       if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
-               ret = rbd_dev_v2_parent_info(rbd_dev);
-               if (ret)
-                       goto out_err;
-
-               /*
-                * Don't print a warning for parent images.  We can
-                * tell this point because we won't know its pool
-                * name yet (just its pool id).
-                */
-               if (rbd_dev->spec->pool_name)
-                       rbd_warn(rbd_dev, "WARNING: kernel layering "
-                                       "is EXPERIMENTAL!");
-       }
-
        /* If the image supports fancy striping, get its parameters */
 
        if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
@@ -4578,28 +4744,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
                if (ret < 0)
                        goto out_err;
        }
-
-       /* crypto and compression type aren't (yet) supported for v2 images */
-
-       rbd_dev->header.crypt_type = 0;
-       rbd_dev->header.comp_type = 0;
-
-       /* Get the snapshot context, plus the header version */
-
-       ret = rbd_dev_v2_snap_context(rbd_dev);
-       if (ret)
-               goto out_err;
-
-       dout("discovered version 2 image, header name is %s\n",
-               rbd_dev->header_name);
+       /* No support for crypto and compression type format 2 images */
 
        return 0;
 out_err:
-       rbd_dev->parent_overlap = 0;
-       rbd_spec_put(rbd_dev->parent_spec);
-       rbd_dev->parent_spec = NULL;
-       kfree(rbd_dev->header_name);
-       rbd_dev->header_name = NULL;
+       rbd_dev->header.features = 0;
        kfree(rbd_dev->header.object_prefix);
        rbd_dev->header.object_prefix = NULL;
 
@@ -4628,15 +4777,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
        if (!parent)
                goto out_err;
 
-       ret = rbd_dev_image_probe(parent);
+       ret = rbd_dev_image_probe(parent, false);
        if (ret < 0)
                goto out_err;
        rbd_dev->parent = parent;
+       atomic_set(&rbd_dev->parent_ref, 1);
 
        return 0;
 out_err:
        if (parent) {
-               rbd_spec_put(rbd_dev->parent_spec);
+               rbd_dev_unparent(rbd_dev);
                kfree(rbd_dev->header_name);
                rbd_dev_destroy(parent);
        } else {
@@ -4651,10 +4801,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 {
        int ret;
 
-       ret = rbd_dev_mapping_set(rbd_dev);
-       if (ret)
-               return ret;
-
        /* generate unique id: find highest unique id, add one */
        rbd_dev_id_get(rbd_dev);
 
@@ -4676,13 +4822,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
        if (ret)
                goto err_out_blkdev;
 
-       ret = rbd_bus_add_dev(rbd_dev);
+       ret = rbd_dev_mapping_set(rbd_dev);
        if (ret)
                goto err_out_disk;
+       set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+
+       ret = rbd_bus_add_dev(rbd_dev);
+       if (ret)
+               goto err_out_mapping;
 
        /* Everything's ready.  Announce the disk to the world. */
 
-       set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
        set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
        add_disk(rbd_dev->disk);
 
@@ -4691,6 +4841,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 
        return ret;
 
+err_out_mapping:
+       rbd_dev_mapping_clear(rbd_dev);
 err_out_disk:
        rbd_free_disk(rbd_dev);
 err_out_blkdev:
@@ -4731,12 +4883,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
 
 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
 {
-       int ret;
-
        rbd_dev_unprobe(rbd_dev);
-       ret = rbd_dev_header_watch_sync(rbd_dev, 0);
-       if (ret)
-               rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
        kfree(rbd_dev->header_name);
        rbd_dev->header_name = NULL;
        rbd_dev->image_format = 0;
@@ -4748,18 +4895,20 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
 
 /*
  * Probe for the existence of the header object for the given rbd
- * device.  For format 2 images this includes determining the image
- * id.
+ * device.  If this image is the one being mapped (i.e., not a
+ * parent), initiate a watch on its header object before using that
+ * object to get detailed information about the rbd image.
  */
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
 {
        int ret;
        int tmp;
 
        /*
-        * Get the id from the image id object.  If it's not a
-        * format 2 image, we'll get ENOENT back, and we'll assume
-        * it's a format 1 image.
+        * Get the id from the image id object.  Unless there's an
+        * error, rbd_dev->spec->image_id will be filled in with
+        * a dynamically-allocated string, and rbd_dev->image_format
+        * will be set to either 1 or 2.
         */
        ret = rbd_dev_image_id(rbd_dev);
        if (ret)
@@ -4771,14 +4920,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
        if (ret)
                goto err_out_format;
 
-       ret = rbd_dev_header_watch_sync(rbd_dev, 1);
-       if (ret)
-               goto out_header_name;
+       if (mapping) {
+               ret = rbd_dev_header_watch_sync(rbd_dev, true);
+               if (ret)
+                       goto out_header_name;
+       }
 
        if (rbd_dev->image_format == 1)
-               ret = rbd_dev_v1_probe(rbd_dev);
+               ret = rbd_dev_v1_header_info(rbd_dev);
        else
-               ret = rbd_dev_v2_probe(rbd_dev);
+               ret = rbd_dev_v2_header_info(rbd_dev);
        if (ret)
                goto err_out_watch;
 
@@ -4787,15 +4938,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
                goto err_out_probe;
 
        ret = rbd_dev_probe_parent(rbd_dev);
-       if (!ret)
-               return 0;
+       if (ret)
+               goto err_out_probe;
+
+       dout("discovered format %u image, header name is %s\n",
+               rbd_dev->image_format, rbd_dev->header_name);
 
+       return 0;
 err_out_probe:
        rbd_dev_unprobe(rbd_dev);
 err_out_watch:
-       tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
-       if (tmp)
-               rbd_warn(rbd_dev, "unable to tear down watch request\n");
+       if (mapping) {
+               tmp = rbd_dev_header_watch_sync(rbd_dev, false);
+               if (tmp)
+                       rbd_warn(rbd_dev, "unable to tear down "
+                                       "watch request (%d)\n", tmp);
+       }
 out_header_name:
        kfree(rbd_dev->header_name);
        rbd_dev->header_name = NULL;
@@ -4819,6 +4977,7 @@ static ssize_t rbd_add(struct bus_type *bus,
        struct rbd_spec *spec = NULL;
        struct rbd_client *rbdc;
        struct ceph_osd_client *osdc;
+       bool read_only;
        int rc = -ENOMEM;
 
        if (!try_module_get(THIS_MODULE))
@@ -4828,13 +4987,15 @@ static ssize_t rbd_add(struct bus_type *bus,
        rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
        if (rc < 0)
                goto err_out_module;
+       read_only = rbd_opts->read_only;
+       kfree(rbd_opts);
+       rbd_opts = NULL;        /* done with this */
 
        rbdc = rbd_get_client(ceph_opts);
        if (IS_ERR(rbdc)) {
                rc = PTR_ERR(rbdc);
                goto err_out_args;
        }
-       ceph_opts = NULL;       /* rbd_dev client now owns this */
 
        /* pick the pool */
        osdc = &rbdc->client->osdc;
@@ -4858,27 +5019,29 @@ static ssize_t rbd_add(struct bus_type *bus,
        rbdc = NULL;            /* rbd_dev now owns this */
        spec = NULL;            /* rbd_dev now owns this */
 
-       rbd_dev->mapping.read_only = rbd_opts->read_only;
-       kfree(rbd_opts);
-       rbd_opts = NULL;        /* done with this */
-
-       rc = rbd_dev_image_probe(rbd_dev);
+       rc = rbd_dev_image_probe(rbd_dev, true);
        if (rc < 0)
                goto err_out_rbd_dev;
 
+       /* If we are mapping a snapshot it must be marked read-only */
+
+       if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+               read_only = true;
+       rbd_dev->mapping.read_only = read_only;
+
        rc = rbd_dev_device_setup(rbd_dev);
-       if (!rc)
-               return count;
+       if (rc) {
+               rbd_dev_image_release(rbd_dev);
+               goto err_out_module;
+       }
+
+       return count;
 
-       rbd_dev_image_release(rbd_dev);
 err_out_rbd_dev:
        rbd_dev_destroy(rbd_dev);
 err_out_client:
        rbd_put_client(rbdc);
 err_out_args:
-       if (ceph_opts)
-               ceph_destroy_options(ceph_opts);
-       kfree(rbd_opts);
        rbd_spec_put(spec);
 err_out_module:
        module_put(THIS_MODULE);
@@ -4911,7 +5074,7 @@ static void rbd_dev_device_release(struct device *dev)
 
        rbd_free_disk(rbd_dev);
        clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
-       rbd_dev_clear_mapping(rbd_dev);
+       rbd_dev_mapping_clear(rbd_dev);
        unregister_blkdev(rbd_dev->major, rbd_dev->name);
        rbd_dev->major = 0;
        rbd_dev_id_put(rbd_dev);
@@ -4978,10 +5141,13 @@ static ssize_t rbd_remove(struct bus_type *bus,
        spin_unlock_irq(&rbd_dev->lock);
        if (ret < 0)
                goto done;
-       ret = count;
        rbd_bus_del_dev(rbd_dev);
+       ret = rbd_dev_header_watch_sync(rbd_dev, false);
+       if (ret)
+               rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
        rbd_dev_image_release(rbd_dev);
        module_put(THIS_MODULE);
+       ret = count;
 done:
        mutex_unlock(&ctl_mutex);
 
index f8ef15f37c5ec67eb6a35151b756c7496a1a666d..3fd130fdfbc1f742aa706ea96aa5d20361d05d37 100644 (file)
@@ -1160,8 +1160,7 @@ static int ace_probe(struct platform_device *dev)
        dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
 
        /* device id and bus width */
-       of_property_read_u32(dev->dev.of_node, "port-number", &id);
-       if (id < 0)
+       if (of_property_read_u32(dev->dev.of_node, "port-number", &id))
                id = 0;
        if (of_find_property(dev->dev.of_node, "8-bit", NULL))
                bus_width = ACE_BUS_WIDTH_8;
index 4ca35e8a5d8c60a73a375dd132d0bbe196810a23..19a12ac64a9ec966149286a84a2f0db5e3e0ee60 100644 (file)
@@ -167,11 +167,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
        clk_prepare_enable(mxc_rng->clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               err = -ENOENT;
-               goto err_region;
-       }
-
        mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(mxc_rng->mem)) {
                err = PTR_ERR(mxc_rng->mem);
@@ -189,7 +184,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
        return 0;
 
 err_ioremap:
-err_region:
        clk_disable_unprepare(mxc_rng->clk);
 
 out:
index 749dc16ca2cc084b81c78c47864ce430489e7e08..d2903e7722704843ac254390046b65b5b5540d6c 100644 (file)
@@ -119,11 +119,6 @@ static int omap_rng_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, priv);
 
        priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!priv->mem_res) {
-               ret = -ENOENT;
-               goto err_ioremap;
-       }
-
        priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
index cdd4c09fda963b2de1e1e3371090a93625d0b771..a22a7a502740ff42a4ce4413b620026d228909ee 100644 (file)
@@ -95,9 +95,9 @@ struct si_sm_data {
        enum bt_states  state;
        unsigned char   seq;            /* BT sequence number */
        struct si_sm_io *io;
-       unsigned char   write_data[IPMI_MAX_MSG_LENGTH];
+       unsigned char   write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
        int             write_count;
-       unsigned char   read_data[IPMI_MAX_MSG_LENGTH];
+       unsigned char   read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
        int             read_count;
        int             truncated;
        long            timeout;        /* microseconds countdown */
index 9eb360ff8cab0cc19178b6412ade5f4cf3d40e18..d5a5f020810afcdf8f6d5d19d1b45960c83589d5 100644 (file)
@@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
                return ipmi_ioctl(filep, cmd, arg);
        }
 }
+
+static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+                                      unsigned long arg)
+{
+       int ret;
+
+       mutex_lock(&ipmi_mutex);
+       ret = compat_ipmi_ioctl(filep, cmd, arg);
+       mutex_unlock(&ipmi_mutex);
+
+       return ret;
+}
 #endif
 
 static const struct file_operations ipmi_fops = {
        .owner          = THIS_MODULE,
        .unlocked_ioctl = ipmi_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
-       .compat_ioctl   = compat_ipmi_ioctl,
+       .compat_ioctl   = unlocked_compat_ipmi_ioctl,
 #endif
        .open           = ipmi_open,
        .release        = ipmi_release,
index 4d439d2fcfd685a7e3e969aee8d7cef5e1cf14ee..4445fa164a2de5de14330af41847ad2bf1ef1c3d 100644 (file)
@@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
-       entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
+       entry->name = kstrdup(name, GFP_KERNEL);
        if (!entry->name) {
                kfree(entry);
                return -ENOMEM;
        }
-       strcpy(entry->name, name);
 
        file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
        if (!file) {
index 313538abe63ceaef860b0a9b6a86ceb64a134907..af4b23ffc5a659c4d515e706c66fe80b202c056e 100644 (file)
@@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
                /* We got the flags from the SMI, now handle them. */
                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
                if (msg[2] != 0) {
-                       dev_warn(smi_info->dev, "Could not enable interrupts"
-                                ", failed get, using polled mode.\n");
+                       dev_warn(smi_info->dev,
+                                "Couldn't get irq info: %x.\n", msg[2]);
+                       dev_warn(smi_info->dev,
+                                "Maybe ok, but ipmi might run very slowly.\n");
                        smi_info->si_state = SI_NORMAL;
                } else {
                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
 
                /* We got the flags from the SMI, now handle them. */
                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
-               if (msg[2] != 0)
-                       dev_warn(smi_info->dev, "Could not enable interrupts"
-                                ", failed set, using polled mode.\n");
-               else
+               if (msg[2] != 0) {
+                       dev_warn(smi_info->dev,
+                                "Couldn't set irq info: %x.\n", msg[2]);
+                       dev_warn(smi_info->dev,
+                                "Maybe ok, but ipmi might run very slowly.\n");
+               } else
                        smi_info->interrupt_disabled = 0;
                smi_info->si_state = SI_NORMAL;
                break;
index dafd9ac6428f2de6e4c2120c675337307d8bd9d6..0913d79424d3a3e81930a4c138979a621de18605 100644 (file)
@@ -622,9 +622,12 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
                                return -EFAULT;
                        break;
                case LPGETSTATUS:
+                       if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+                               return -EINTR;
                        lp_claim_parport_or_block (&lp_table[minor]);
                        status = r_str(minor);
                        lp_release_parport (&lp_table[minor]);
+                       mutex_unlock(&lp_table[minor].port_mutex);
 
                        if (copy_to_user(argp, &status, sizeof(int)))
                                return -EFAULT;
index cd9a6211dcadd61243905335a87d1e75bd91940e..35487e8ded59f106e3a7b157aa3d5bf46f9579e8 100644 (file)
@@ -865,16 +865,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
        if (r->entropy_count / 8 < min + reserved) {
                nbytes = 0;
        } else {
+               int entropy_count, orig;
+retry:
+               entropy_count = orig = ACCESS_ONCE(r->entropy_count);
                /* If limited, never pull more than available */
-               if (r->limit && nbytes + reserved >= r->entropy_count / 8)
-                       nbytes = r->entropy_count/8 - reserved;
-
-               if (r->entropy_count / 8 >= nbytes + reserved)
-                       r->entropy_count -= nbytes*8;
-               else
-                       r->entropy_count = reserved;
+               if (r->limit && nbytes + reserved >= entropy_count / 8)
+                       nbytes = entropy_count/8 - reserved;
+
+               if (entropy_count / 8 >= nbytes + reserved) {
+                       entropy_count -= nbytes*8;
+                       if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+                               goto retry;
+               } else {
+                       entropy_count = reserved;
+                       if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+                               goto retry;
+               }
 
-               if (r->entropy_count < random_write_wakeup_thresh)
+               if (entropy_count < random_write_wakeup_thresh)
                        wakeup_write = 1;
        }
 
@@ -957,10 +965,23 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 {
        ssize_t ret = 0, i;
        __u8 tmp[EXTRACT_SIZE];
+       unsigned long flags;
 
        /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
-       if (fips_enabled && !r->last_data_init)
-               nbytes += EXTRACT_SIZE;
+       if (fips_enabled) {
+               spin_lock_irqsave(&r->lock, flags);
+               if (!r->last_data_init) {
+                       r->last_data_init = true;
+                       spin_unlock_irqrestore(&r->lock, flags);
+                       trace_extract_entropy(r->name, EXTRACT_SIZE,
+                                             r->entropy_count, _RET_IP_);
+                       xfer_secondary_pool(r, EXTRACT_SIZE);
+                       extract_buf(r, tmp);
+                       spin_lock_irqsave(&r->lock, flags);
+                       memcpy(r->last_data, tmp, EXTRACT_SIZE);
+               }
+               spin_unlock_irqrestore(&r->lock, flags);
+       }
 
        trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
        xfer_secondary_pool(r, nbytes);
@@ -970,19 +991,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
                extract_buf(r, tmp);
 
                if (fips_enabled) {
-                       unsigned long flags;
-
-
-                       /* prime last_data value if need be, per fips 140-2 */
-                       if (!r->last_data_init) {
-                               spin_lock_irqsave(&r->lock, flags);
-                               memcpy(r->last_data, tmp, EXTRACT_SIZE);
-                               r->last_data_init = true;
-                               nbytes -= EXTRACT_SIZE;
-                               spin_unlock_irqrestore(&r->lock, flags);
-                               extract_buf(r, tmp);
-                       }
-
                        spin_lock_irqsave(&r->lock, flags);
                        if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
                                panic("Hardware RNG duplicated output!\n");
index 4945bd3d18d0e9de294ba2f1fb8c996662820a39..d5d2e4a985aa8602ff75f840f6af7e963f68471d 100644 (file)
@@ -179,7 +179,6 @@ static int __init ttyprintk_init(void)
 {
        int ret = -ENOMEM;
 
-       tpk_port.port.ops = &null_ops;
        mutex_init(&tpk_port.port_write_mutex);
 
        ttyprintk_driver = tty_alloc_driver(1,
@@ -190,6 +189,7 @@ static int __init ttyprintk_init(void)
                return PTR_ERR(ttyprintk_driver);
 
        tty_port_init(&tpk_port.port);
+       tpk_port.port.ops = &null_ops;
 
        ttyprintk_driver->driver_name = "ttyprintk";
        ttyprintk_driver->name = "ttyprintk";
index 892728412e9ddb0af46ec0a7f93cdeceadc4ed40..24f553673b72d0502415cae85dcfd8a281de06ec 100644 (file)
@@ -932,7 +932,7 @@ static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw,
        unsigned char reg;
        unsigned char rdiv;
 
-       if (hwdata->num > 5)
+       if (hwdata->num <= 5)
                reg = si5351_msynth_params_address(hwdata->num) + 2;
        else
                reg = SI5351_CLK6_7_OUTPUT_DIVIDER;
@@ -1477,6 +1477,16 @@ static int si5351_i2c_probe(struct i2c_client *client,
                        return -EINVAL;
                }
                drvdata->onecell.clks[n] = clk;
+
+               /* set initial clkout rate */
+               if (pdata->clkout[n].rate != 0) {
+                       int ret;
+                       ret = clk_set_rate(clk, pdata->clkout[n].rate);
+                       if (ret != 0) {
+                               dev_err(&client->dev, "Cannot set rate : %d\n",
+                                       ret);
+                       }
+               }
        }
 
        ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
index debf688afa8e503c4dba690b5cfffe983c7abae0..553ac35bcc912ea9f975927f8f91ff1e680dec6d 100644 (file)
@@ -183,7 +183,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
        writel(divisor, cdev->div_reg);
        vt8500_pmc_wait_busy();
 
-       spin_lock_irqsave(cdev->lock, flags);
+       spin_unlock_irqrestore(cdev->lock, flags);
 
        return 0;
 }
index d0e5eed146de69baf6baf1295f12cadce8d4a923..4faf0afc44cd5a2ebe0761af3e8d1250ed6c2b77 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk/mxs.h>
 #include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/init.h>
index d0940e69d034ffbaebc53a36cfe6606035304a16..3c1f88868f295e9df2e2f367ee91ebbec93db981 100644 (file)
@@ -791,7 +791,8 @@ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
        GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
        GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
        GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
-       GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+       GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
+                       CLK_IGNORE_UNUSED, 0),
        GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0),
        GATE(smmu_rotator, "smmu_rotator", "aclk200",
                        E4210_GATE_IP_IMAGE, 4, 0, 0),
@@ -819,7 +820,8 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
        GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0),
        GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
        GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
-       GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, 0, 0),
+       GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
+                       CLK_IGNORE_UNUSED, 0),
        GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0),
        GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0",
                        SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0),
index 8292a00c3de9f1dbae8b19e23b6ac45b3d275866..075db0c99edb9b8569556d853fbaee0e88c2cb2c 100644 (file)
@@ -872,6 +872,14 @@ static void __init tegra20_periph_clk_init(void)
        struct clk *clk;
        int i;
 
+       /* ac97 */
+       clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0",
+                                   TEGRA_PERIPH_ON_APB,
+                                   clk_base, 0, 3, &periph_l_regs,
+                                   periph_clk_enb_refcnt);
+       clk_register_clkdev(clk, NULL, "tegra20-ac97");
+       clks[ac97] = clk;
+
        /* apbdma */
        clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
                                    0, 34, &periph_h_regs,
@@ -1234,9 +1242,6 @@ static __initdata struct tegra_clk_init_table init_table[] = {
        {uartc, pll_p, 0, 0},
        {uartd, pll_p, 0, 0},
        {uarte, pll_p, 0, 0},
-       {usbd, clk_max, 12000000, 0},
-       {usb2, clk_max, 12000000, 0},
-       {usb3, clk_max, 12000000, 0},
        {pll_a, clk_max, 56448000, 1},
        {pll_a_out0, clk_max, 11289600, 1},
        {cdev1, clk_max, 0, 1},
index bc7e9bde792b9ea5061752cd368a5a9e87fcbd9c..e364c9d4aa60e38b05085926297d3514da319048 100644 (file)
@@ -145,7 +145,13 @@ static struct clk *clk_reg_sysctrl(struct device *dev,
                return ERR_PTR(-ENOMEM);
        }
 
-       for (i = 0; i < num_parents; i++) {
+       /* set main clock registers */
+       clk->reg_sel[0] = reg_sel[0];
+       clk->reg_bits[0] = reg_bits[0];
+       clk->reg_mask[0] = reg_mask[0];
+
+       /* handle clocks with more than one parent */
+       for (i = 1; i < num_parents; i++) {
                clk->reg_sel[i] = reg_sel[i];
                clk->reg_bits[i] = reg_bits[i];
                clk->reg_mask[i] = reg_mask[i];
index 0b4f35a5ffc273b45cc602f1a4f819d18cd43b86..80069c370a47a1c560f05929ce7bd2aedb651514 100644 (file)
@@ -325,7 +325,7 @@ void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
        clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
                                BIT(0), 0);
        clk_register_clkdev(clk, "fsmc", NULL);
-       clk_register_clkdev(clk, NULL, "smsc911x");
+       clk_register_clkdev(clk, NULL, "smsc911x.0");
 
        clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
                                BIT(1), 0);
index 5cf4f4686406e639e71353984ea7b84d0bc2709b..4f45eee9e33b2746f95c42867c2339f67707fb30 100644 (file)
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/platform_data/clk-lpss.h>
 #include <linux/platform_device.h>
 
 #define PRV_CLOCK_PARAMS 0x800
 
 static int lpt_clk_probe(struct platform_device *pdev)
 {
+       struct lpss_clk_data *drvdata;
        struct clk *clk;
 
+       drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
        /* LPSS free running clock */
-       clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT,
-                                     100000000);
+       drvdata->name = "lpss_clk";
+       clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL,
+                                     CLK_IS_ROOT, 100000000);
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
-       /* Shared DMA clock */
-       clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
+       drvdata->clk = clk;
+       platform_set_drvdata(pdev, drvdata);
        return 0;
 }
 
index a1488f58f6ca40eb0b660a82e97a54ce0246a430..534fcb8251538a31d2695313b1d565990f8d51d4 100644 (file)
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS
 
 choice
        prompt "Default CPUFreq governor"
-       default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
+       default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
        default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
        help
          This option sets which CPUFreq governor shall be loaded at
index f3af18b9acc50f299b7f105b5e0b59effe5f4f33..6e57543fe0b981e55cfdd9623c5eb1ed910ffd8c 100644 (file)
@@ -3,16 +3,17 @@
 #
 
 config ARM_BIG_LITTLE_CPUFREQ
-       tristate
-       depends on ARM_CPU_TOPOLOGY
+       tristate "Generic ARM big LITTLE CPUfreq driver"
+       depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
+       help
+         This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
 
 config ARM_DT_BL_CPUFREQ
-       tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
-       select ARM_BIG_LITTLE_CPUFREQ
-       depends on OF && HAVE_CLK
+       tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+       depends on ARM_BIG_LITTLE_CPUFREQ && OF
        help
-         This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
-         This gets frequency tables from DT.
+         This enables probing via DT for Generic CPUfreq driver for ARM
+         big.LITTLE platform. This gets frequency tables from DT.
 
 config ARM_EXYNOS_CPUFREQ
        bool "SAMSUNG EXYNOS SoCs"
index 2b8a8c3745486d4cdf62c79490ad263815bf6032..6bd63d63d356ba0f2fc731e65824409cee9bf158 100644 (file)
@@ -272,7 +272,7 @@ config X86_LONGHAUL
 config X86_E_POWERSAVER
        tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
        select CPU_FREQ_TABLE
-       depends on X86_32
+       depends on X86_32 && ACPI_PROCESSOR
        help
          This adds the CPUFreq driver for VIA C7 processors.  However, this driver
          does not have any safeguards to prevent operating the CPU out of spec
index 11b8b4b54ceb5c85cfe81cba6d2064e6f45077f6..edc089e9d0c42028aa93a208dde52dea9224df53 100644 (file)
@@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask)
        switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
-               cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+               cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
                break;
        case SYSTEM_AMD_MSR_CAPABLE:
                cmd.type = SYSTEM_AMD_MSR_CAPABLE;
-               cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+               cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
                break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
index dbdf677d2f3610e4e28340a3785090b245d5c336..5d7f53fcd6f5eac052d4a0bfed4d48f531921d8c 100644 (file)
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS];
 static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
 static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
 
-static int cpu_to_cluster(int cpu)
-{
-       return topology_physical_package_id(cpu);
-}
-
 static unsigned int bL_cpufreq_get(unsigned int cpu)
 {
        u32 cur_cluster = cpu_to_cluster(cpu);
@@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
 
        cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 
-       dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
+       dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
        return 0;
 }
 
index 70f18fc12d4ad2a1a2c14f00ae1844e5378cdaee..79b2ce17884dd9cc645cb5a34c33e90290bba10a 100644 (file)
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops {
        int (*init_opp_table)(struct device *cpu_dev);
 };
 
+static inline int cpu_to_cluster(int cpu)
+{
+       return topology_physical_package_id(cpu);
+}
+
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
 void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
 
index 44be3115375ca968897b16726a3f9192ab0e1695..fd9e3ea6a480aad30c0f9c593eb683470605e36d 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/opp.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include "arm_big_little.h"
 
-static int dt_init_opp_table(struct device *cpu_dev)
+/* get cpu node with valid operating-points */
+static struct device_node *get_cpu_node_with_valid_op(int cpu)
 {
-       struct device_node *np, *parent;
-       int count = 0, ret;
+       struct device_node *np = NULL, *parent;
+       int count = 0;
 
        parent = of_find_node_by_path("/cpus");
        if (!parent) {
                pr_err("failed to find OF /cpus\n");
-               return -ENOENT;
+               return NULL;
        }
 
        for_each_child_of_node(parent, np) {
-               if (count++ != cpu_dev->id)
+               if (count++ != cpu)
                        continue;
                if (!of_get_property(np, "operating-points", NULL)) {
-                       ret = -ENODATA;
-               } else {
-                       cpu_dev->of_node = np;
-                       ret = of_init_opp_table(cpu_dev);
+                       of_node_put(np);
+                       np = NULL;
                }
-               of_node_put(np);
-               of_node_put(parent);
 
-               return ret;
+               break;
        }
 
-       return -ENODEV;
+       of_node_put(parent);
+       return np;
 }
 
-static int dt_get_transition_latency(struct device *cpu_dev)
+static int dt_init_opp_table(struct device *cpu_dev)
 {
-       struct device_node *np, *parent;
-       u32 transition_latency = CPUFREQ_ETERNAL;
-       int count = 0;
+       struct device_node *np;
+       int ret;
 
-       parent = of_find_node_by_path("/cpus");
-       if (!parent) {
-               pr_err("failed to find OF /cpus\n");
-               return -ENOENT;
-       }
+       np = get_cpu_node_with_valid_op(cpu_dev->id);
+       if (!np)
+               return -ENODATA;
 
-       for_each_child_of_node(parent, np) {
-               if (count++ != cpu_dev->id)
-                       continue;
+       cpu_dev->of_node = np;
+       ret = of_init_opp_table(cpu_dev);
+       of_node_put(np);
 
-               of_property_read_u32(np, "clock-latency", &transition_latency);
-               of_node_put(np);
-               of_node_put(parent);
+       return ret;
+}
 
-               return 0;
-       }
+static int dt_get_transition_latency(struct device *cpu_dev)
+{
+       struct device_node *np;
+       u32 transition_latency = CPUFREQ_ETERNAL;
+
+       np = get_cpu_node_with_valid_op(cpu_dev->id);
+       if (!np)
+               return CPUFREQ_ETERNAL;
 
-       return -ENODEV;
+       of_property_read_u32(np, "clock-latency", &transition_latency);
+       of_node_put(np);
+
+       pr_debug("%s: clock-latency: %d\n", __func__, transition_latency);
+       return transition_latency;
 }
 
 static struct cpufreq_arm_bL_ops dt_bL_ops = {
@@ -90,17 +96,33 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
        .init_opp_table = dt_init_opp_table,
 };
 
-static int generic_bL_init(void)
+static int generic_bL_probe(struct platform_device *pdev)
 {
+       struct device_node *np;
+
+       np = get_cpu_node_with_valid_op(0);
+       if (!np)
+               return -ENODEV;
+
+       of_node_put(np);
        return bL_cpufreq_register(&dt_bL_ops);
 }
-module_init(generic_bL_init);
 
-static void generic_bL_exit(void)
+static int generic_bL_remove(struct platform_device *pdev)
 {
-       return bL_cpufreq_unregister(&dt_bL_ops);
+       bL_cpufreq_unregister(&dt_bL_ops);
+       return 0;
 }
-module_exit(generic_bL_exit);
+
+static struct platform_driver generic_bL_platdrv = {
+       .driver = {
+               .name   = "arm-bL-cpufreq-dt",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = generic_bL_probe,
+       .remove         = generic_bL_remove,
+};
+module_platform_driver(generic_bL_platdrv);
 
 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
 MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
index 3ab8294eab04f280f04ce3c2ea42b0fb2722fc86..ad1fde277661e617fd4f19e01f34cd963f1349f4 100644 (file)
@@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
        struct cpufreq_freqs freqs;
        struct opp *opp;
        unsigned long volt = 0, volt_old = 0, tol = 0;
-       long freq_Hz;
+       long freq_Hz, freq_exact;
        unsigned int index;
        int ret;
 
@@ -60,6 +60,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
        freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
        if (freq_Hz < 0)
                freq_Hz = freq_table[index].frequency * 1000;
+       freq_exact = freq_Hz;
        freqs.new = freq_Hz / 1000;
        freqs.old = clk_get_rate(cpu_clk) / 1000;
 
@@ -98,7 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
                }
        }
 
-       ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+       ret = clk_set_rate(cpu_clk, freq_exact);
        if (ret) {
                pr_err("failed to set clock rate: %d\n", ret);
                if (cpu_reg)
@@ -189,12 +190,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
 
        if (!np) {
                pr_err("failed to find cpu0 node\n");
-               return -ENOENT;
+               ret = -ENOENT;
+               goto out_put_parent;
        }
 
        cpu_dev = &pdev->dev;
        cpu_dev->of_node = np;
 
+       cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
+       if (IS_ERR(cpu_reg)) {
+               /*
+                * If cpu0 regulator supply node is present, but regulator is
+                * not yet registered, we should try defering probe.
+                */
+               if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+                       dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
+                       ret = -EPROBE_DEFER;
+                       goto out_put_node;
+               }
+               pr_warn("failed to get cpu0 regulator: %ld\n",
+                       PTR_ERR(cpu_reg));
+               cpu_reg = NULL;
+       }
+
        cpu_clk = devm_clk_get(cpu_dev, NULL);
        if (IS_ERR(cpu_clk)) {
                ret = PTR_ERR(cpu_clk);
@@ -202,12 +220,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                goto out_put_node;
        }
 
-       cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
-       if (IS_ERR(cpu_reg)) {
-               pr_warn("failed to get cpu0 regulator\n");
-               cpu_reg = NULL;
-       }
-
        ret = of_init_opp_table(cpu_dev);
        if (ret) {
                pr_err("failed to init OPP table: %d\n", ret);
@@ -264,6 +276,8 @@ out_free_table:
        opp_free_cpufreq_table(cpu_dev, &freq_table);
 out_put_node:
        of_node_put(np);
+out_put_parent:
+       of_node_put(parent);
        return ret;
 }
 
index 1b8a48eaf90f1c4d2e541e9e03e58f03ca8b6d80..2d53f47d1747360b8968c7c308c0f975777ef9ab 100644 (file)
@@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
                                __func__, cpu_dev->id, cpu);
        }
 
+       if ((cpus == 1) && (cpufreq_driver->target))
+               __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
        pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
        cpufreq_cpu_put(data);
 
        /* If cpu is last user of policy, free policy */
        if (cpus == 1) {
-               if (cpufreq_driver->target)
-                       __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
                lock_policy_rwsem_read(cpu);
                kobj = &data->kobj;
                cmp = &data->kobj_unregister;
@@ -1729,18 +1729,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
                        /* end old governor */
                        if (data->governor) {
                                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+                               unlock_policy_rwsem_write(policy->cpu);
                                __cpufreq_governor(data,
                                                CPUFREQ_GOV_POLICY_EXIT);
+                               lock_policy_rwsem_write(policy->cpu);
                        }
 
                        /* start new governor */
                        data->governor = policy->governor;
                        if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
-                               if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
+                               if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
                                        failed = 0;
-                               else
+                               } else {
+                                       unlock_policy_rwsem_write(policy->cpu);
                                        __cpufreq_governor(data,
                                                        CPUFREQ_GOV_POLICY_EXIT);
+                                       lock_policy_rwsem_write(policy->cpu);
+                               }
                        }
 
                        if (failed) {
@@ -1832,15 +1837,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
        if (dev) {
                switch (action) {
                case CPU_ONLINE:
-               case CPU_ONLINE_FROZEN:
                        cpufreq_add_dev(dev, NULL);
                        break;
                case CPU_DOWN_PREPARE:
-               case CPU_DOWN_PREPARE_FROZEN:
+               case CPU_UP_CANCELED_FROZEN:
                        __cpufreq_remove_dev(dev, NULL);
                        break;
                case CPU_DOWN_FAILED:
-               case CPU_DOWN_FAILED_FROZEN:
                        cpufreq_add_dev(dev, NULL);
                        break;
                }
index 443442df113be73b33173c1a44109b4619508d7a..dc9b72e25c1ab66c429fb65e9cb1d410ef4ee908 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/tick.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <linux/cpu.h>
 
 #include "cpufreq_governor.h"
 
@@ -180,8 +181,10 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
        if (!all_cpus) {
                __gov_queue_work(smp_processor_id(), dbs_data, delay);
        } else {
+               get_online_cpus();
                for_each_cpu(i, policy->cpus)
                        __gov_queue_work(i, dbs_data, delay);
+               put_online_cpus();
        }
 }
 EXPORT_SYMBOL_GPL(gov_queue_work);
@@ -255,6 +258,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                if (have_governor_per_policy()) {
                        WARN_ON(dbs_data);
                } else if (dbs_data) {
+                       dbs_data->usage_count++;
                        policy->governor_data = dbs_data;
                        return 0;
                }
@@ -266,6 +270,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                }
 
                dbs_data->cdata = cdata;
+               dbs_data->usage_count = 1;
                rc = cdata->init(dbs_data);
                if (rc) {
                        pr_err("%s: POLICY_INIT: init() failed\n", __func__);
@@ -294,7 +299,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
                                        latency * LATENCY_MULTIPLIER));
 
-               if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+               if ((cdata->governor == GOV_CONSERVATIVE) &&
+                               (!policy->governor->initialized)) {
                        struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
                        cpufreq_register_notifier(cs_ops->notifier_block,
@@ -306,12 +312,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 
                return 0;
        case CPUFREQ_GOV_POLICY_EXIT:
-               if ((policy->governor->initialized == 1) ||
-                               have_governor_per_policy()) {
+               if (!--dbs_data->usage_count) {
                        sysfs_remove_group(get_governor_parent_kobj(policy),
                                        get_sysfs_attr(dbs_data));
 
-                       if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+                       if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
+                               (policy->governor->initialized == 1)) {
                                struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
                                cpufreq_unregister_notifier(cs_ops->notifier_block,
index 8ac33538d0bdb4f66cc7572f53db4c463190ae92..e16a96130cb3491d30f376728144cdca92e441cd 100644 (file)
@@ -211,6 +211,7 @@ struct common_dbs_data {
 struct dbs_data {
        struct common_dbs_data *cdata;
        unsigned int min_sampling_rate;
+       int usage_count;
        void *tuners;
 
        /* dbs_mutex protects dbs_enable in governor start/stop */
index b0ffef96bf77a2c1bcaf0680c6a500a7430a6858..4b9bb5def6f159a126e4f05aa6a76d365c6a23f8 100644 (file)
@@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data)
        tuners->io_is_busy = should_io_be_busy();
 
        dbs_data->tuners = tuners;
-       pr_info("%s: tuners %p\n", __func__, tuners);
        mutex_init(&dbs_data->mutex);
        return 0;
 }
index bfd6273fd873531d864f3b4e104d37018a5d8dda..fb65decffa28128ded8817441b2e5eff8ecad8e3 100644 (file)
@@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
                cpufreq_update_policy(cpu);
                break;
        case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
                cpufreq_stats_free_sysfs(cpu);
                break;
        case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
+               cpufreq_stats_free_table(cpu);
+               break;
+       case CPU_UP_CANCELED_FROZEN:
+               cpufreq_stats_free_sysfs(cpu);
                cpufreq_stats_free_table(cpu);
                break;
        }
index cc3a8e6c92beca8669e1e45e48051d9a9d26a81d..07f2840ad80596b529488f40808e10c70782f498 100644 (file)
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
 }
 
 struct sample {
-       ktime_t start_time;
-       ktime_t end_time;
        int core_pct_busy;
-       int pstate_pct_busy;
-       u64 duration_us;
-       u64 idletime_us;
        u64 aperf;
        u64 mperf;
        int freq;
@@ -86,13 +81,9 @@ struct cpudata {
        struct pstate_adjust_policy *pstate_policy;
        struct pstate_data pstate;
        struct _pid pid;
-       struct _pid idle_pid;
 
        int min_pstate_count;
-       int idle_mode;
 
-       ktime_t prev_sample;
-       u64     prev_idle_time_us;
        u64     prev_aperf;
        u64     prev_mperf;
        int     sample_ptr;
@@ -124,6 +115,8 @@ struct perf_limits {
        int min_perf_pct;
        int32_t max_perf;
        int32_t min_perf;
+       int max_policy_pct;
+       int max_sysfs_pct;
 };
 
 static struct perf_limits limits = {
@@ -132,6 +125,8 @@ static struct perf_limits limits = {
        .max_perf = int_tofp(1),
        .min_perf_pct = 0,
        .min_perf = 0,
+       .max_policy_pct = 100,
+       .max_sysfs_pct = 100,
 };
 
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
                0);
 }
 
-static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
-{
-       pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
-       pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
-       pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
-
-       pid_reset(&cpu->idle_pid,
-               75,
-               50,
-               cpu->pstate_policy->deadband,
-               0);
-}
-
 static inline void intel_pstate_reset_all_pid(void)
 {
        unsigned int cpu;
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
        if (ret != 1)
                return -EINVAL;
 
-       limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+       limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+       limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
        return count;
 }
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
        if (pstate == cpu->pstate.current_pstate)
                return;
 
-#ifndef MODULE
        trace_cpu_frequency(pstate * 100000, cpu->cpu);
-#endif
+
        cpu->pstate.current_pstate = pstate;
        wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
 
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        struct sample *sample)
 {
        u64 core_pct;
-       sample->pstate_pct_busy = 100 - div64_u64(
-                                       sample->idletime_us * 100,
-                                       sample->duration_us);
        core_pct = div64_u64(sample->aperf * 100, sample->mperf);
        sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
 
-       sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
-                                       100);
+       sample->core_pct_busy = core_pct;
 }
 
 static inline void intel_pstate_sample(struct cpudata *cpu)
 {
-       ktime_t now;
-       u64 idle_time_us;
        u64 aperf, mperf;
 
-       now = ktime_get();
-       idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
-
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
-       /* for the first sample, don't actually record a sample, just
-        * set the baseline */
-       if (cpu->prev_idle_time_us > 0) {
-               cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
-               cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
-               cpu->samples[cpu->sample_ptr].end_time = now;
-               cpu->samples[cpu->sample_ptr].duration_us =
-                       ktime_us_delta(now, cpu->prev_sample);
-               cpu->samples[cpu->sample_ptr].idletime_us =
-                       idle_time_us - cpu->prev_idle_time_us;
-
-               cpu->samples[cpu->sample_ptr].aperf = aperf;
-               cpu->samples[cpu->sample_ptr].mperf = mperf;
-               cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
-               cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
-
-               intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
-       }
+       cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+       cpu->samples[cpu->sample_ptr].aperf = aperf;
+       cpu->samples[cpu->sample_ptr].mperf = mperf;
+       cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+       cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+       intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
 
-       cpu->prev_sample = now;
-       cpu->prev_idle_time_us = idle_time_us;
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
 }
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
        mod_timer_pinned(&cpu->timer, jiffies + delay);
 }
 
-static inline void intel_pstate_idle_mode(struct cpudata *cpu)
-{
-       cpu->idle_mode = 1;
-}
-
-static inline void intel_pstate_normal_mode(struct cpudata *cpu)
-{
-       cpu->idle_mode = 0;
-}
-
 static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
        int32_t busy_scaled;
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
                intel_pstate_pstate_decrease(cpu, steps);
 }
 
-static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
-{
-       int busy_scaled;
-       struct _pid *pid;
-       int ctl = 0;
-       int steps;
-
-       pid = &cpu->idle_pid;
-
-       busy_scaled = intel_pstate_get_scaled_busy(cpu);
-
-       ctl = pid_calc(pid, 100 - busy_scaled);
-
-       steps = abs(ctl);
-       if (ctl < 0)
-               intel_pstate_pstate_decrease(cpu, steps);
-       else
-               intel_pstate_pstate_increase(cpu, steps);
-
-       if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
-               intel_pstate_normal_mode(cpu);
-}
-
 static void intel_pstate_timer_func(unsigned long __data)
 {
        struct cpudata *cpu = (struct cpudata *) __data;
 
        intel_pstate_sample(cpu);
+       intel_pstate_adjust_busy_pstate(cpu);
 
-       if (!cpu->idle_mode)
-               intel_pstate_adjust_busy_pstate(cpu);
-       else
-               intel_pstate_adjust_idle_pstate(cpu);
-
-#if defined(XPERF_FIX)
        if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
                cpu->min_pstate_count++;
                if (!(cpu->min_pstate_count % 5)) {
                        intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
-                       intel_pstate_idle_mode(cpu);
                }
        } else
                cpu->min_pstate_count = 0;
-#endif
+
        intel_pstate_set_sample_time(cpu);
 }
 
@@ -600,6 +521,7 @@ static void intel_pstate_timer_func(unsigned long __data)
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x2a, default_policy),
        ICPU(0x2d, default_policy),
+       ICPU(0x3a, default_policy),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -631,7 +553,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
                (unsigned long)cpu;
        cpu->timer.expires = jiffies + HZ/100;
        intel_pstate_busy_pid_reset(cpu);
-       intel_pstate_idle_pid_reset(cpu);
        intel_pstate_sample(cpu);
        intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
 
@@ -675,8 +596,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
        limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 
-       limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
-       limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+       limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+       limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+       limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
        return 0;
@@ -788,10 +710,9 @@ static int __init intel_pstate_init(void)
 
        pr_info("Intel P-state driver initializing.\n");
 
-       all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
+       all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
                return -ENOMEM;
-       memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
 
        rc = cpufreq_register_driver(&intel_pstate_driver);
        if (rc)
index d36ea8dc96eb9fdc0812e0e93512f8f97cda66f8..b2644af985ec8e741daf009262069694c671513b 100644 (file)
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
        priv.dev = &pdev->dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Cannot get memory resource\n");
-               return -ENODEV;
-       }
        priv.base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv.base))
                return PTR_ERR(priv.base);
index 84889573b5669f22e1632108c5f32b94285d5814..d53912768946b96c1c27b017dbaeb7a10e7c2feb 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/clock.h>
+#include <asm/idle.h>
 
 #include <asm/mach-loongson/loongson.h>
 
@@ -200,6 +201,7 @@ static void loongson2_cpu_wait(void)
        LOONGSON_CHIPCFG0 &= ~0x7;      /* Put CPU into wait mode */
        LOONGSON_CHIPCFG0 = cpu_freq;   /* Restore CPU state */
        spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+       local_irq_enable();
 }
 
 static int __init cpufreq_init(void)
index 765fdf5ce579bafa81eb79e67f8341f424e22fe3..bf416a8391a77ec94dc3686d773e5afb7b437b3d 100644 (file)
@@ -1154,7 +1154,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
                dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
 
        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-                                DMA_BIDIRECTIONAL, assoc_chained);
+                                DMA_TO_DEVICE, assoc_chained);
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
@@ -1336,7 +1336,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
 
        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-                                DMA_BIDIRECTIONAL, assoc_chained);
+                                DMA_TO_DEVICE, assoc_chained);
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
index a76d4c4f29f50798ed3ecafaa871d98a7d02c7e3..35d483f8db66b2a3ecb204181ffd0d43b0aaaffe 100644 (file)
@@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = {
        .cra_blocksize   = AES_BLOCK_SIZE,
        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
        .cra_type        = &crypto_blkcipher_type,
+       .cra_alignmask   = 0xf,
        .cra_module      = THIS_MODULE,
        .cra_init        = nx_crypto_ctx_aes_cbc_init,
        .cra_exit        = nx_crypto_ctx_exit,
index ba5f1611336fe9fa5165cfa680e7acc2b7e38637..7bbc9a81da219e5c27e021c9ef6c880d194c90d3 100644 (file)
@@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = {
        .cra_priority    = 300,
        .cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
        .cra_blocksize   = AES_BLOCK_SIZE,
+       .cra_alignmask   = 0xf,
        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
        .cra_type        = &crypto_blkcipher_type,
        .cra_module      = THIS_MODULE,
index c8109edc5cfb02063c1bd6c5914985846bccac2a..6cca6c392b00f34fa65ad4663b1725bd7b3842fe 100644 (file)
@@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
        if (enc)
                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
        else
-               nbytes -= AES_BLOCK_SIZE;
+               nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
 
        csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
 
index 9767315f8c0bd069a451f1a2c74bbf2f2bff628d..67024f2f0b78746bdbfcb8c5ffde595754a0b155 100644 (file)
@@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
         *  1: <= SHA256_BLOCK_SIZE: copy into state, return 0
         *  2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if (len + sctx->count <= SHA256_BLOCK_SIZE) {
+       if (len + sctx->count < SHA256_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count, data, len);
                sctx->count += len;
                goto out;
@@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        atomic_inc(&(nx_ctx->stats->sha256_ops));
 
        /* copy the leftover back into the state struct */
-       memcpy(sctx->buf, data + len - leftover, leftover);
+       if (leftover)
+               memcpy(sctx->buf, data + len - leftover, leftover);
        sctx->count = leftover;
 
        csbcpb->cpb.sha256.message_bit_length += (u64)
@@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
        struct nx_sg *in_sg, *out_sg;
        int rc;
 
+
        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
                /* we've hit the nx chip previously, now we're finalizing,
                 * so copy over the partial digest */
@@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 
        atomic_inc(&(nx_ctx->stats->sha256_ops));
 
-       atomic64_add(csbcpb->cpb.sha256.message_bit_length,
+       atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
                     &(nx_ctx->stats->sha256_bytes));
        memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 out:
index 3177b8c3d5f1e3073ef559d902fe3cd488e8360b..08eee11223490c7a1a249a2f559f960e72e7af2c 100644 (file)
@@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
         *  1: <= SHA512_BLOCK_SIZE: copy into state, return 0
         *  2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
+       if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count[0], data, len);
                sctx->count[0] += len;
                goto out;
@@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        atomic_inc(&(nx_ctx->stats->sha512_ops));
 
        /* copy the leftover back into the state struct */
-       memcpy(sctx->buf, data + len - leftover, leftover);
+       if (leftover)
+               memcpy(sctx->buf, data + len - leftover, leftover);
        sctx->count[0] = leftover;
 
        spbc_bits = csbcpb->cpb.sha512.spbc * 8;
@@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
                goto out;
 
        atomic_inc(&(nx_ctx->stats->sha512_ops));
-       atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
+       atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
                     &(nx_ctx->stats->sha512_bytes));
 
        memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
index c767f232e6933bc876adedd56a1f7e655f93c815..bbdab6e5ccf08f75fcc23481dbb9d29a1df6cd69 100644 (file)
@@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
 {
        struct nx_sg *nx_insg = nx_ctx->in_sg;
        struct nx_sg *nx_outsg = nx_ctx->out_sg;
-       struct blkcipher_walk walk;
-       int rc;
-
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
-       if (rc)
-               goto out;
 
        if (iv)
-               memcpy(iv, walk.iv, AES_BLOCK_SIZE);
+               memcpy(iv, desc->info, AES_BLOCK_SIZE);
 
-       while (walk.nbytes) {
-               nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
-                                          walk.nbytes, nx_ctx->ap->sglen);
-               nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
-                                           walk.nbytes, nx_ctx->ap->sglen);
-
-               rc = blkcipher_walk_done(desc, &walk, 0);
-               if (rc)
-                       break;
-       }
-
-       if (walk.nbytes) {
-               nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
-                                          walk.nbytes, nx_ctx->ap->sglen);
-               nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
-                                           walk.nbytes, nx_ctx->ap->sglen);
-
-               rc = 0;
-       }
+       nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
+       nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
 
        /* these lengths should be negative, which will indicate to phyp that
         * the input and output parameters are scatterlists, not linear
         * buffers */
        nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
-out:
-       return rc;
+
+       return 0;
 }
 
 /**
@@ -454,6 +430,8 @@ static int nx_register_algs(void)
        if (rc)
                goto out;
 
+       nx_driver.of.status = NX_OKAY;
+
        rc = crypto_register_alg(&nx_ecb_aes_alg);
        if (rc)
                goto out;
@@ -498,8 +476,6 @@ static int nx_register_algs(void)
        if (rc)
                goto out_unreg_s512;
 
-       nx_driver.of.status = NX_OKAY;
-
        goto out;
 
 out_unreg_s512:
index a97bb6c1596c0cd7ffee419d12bf8b04b158359b..c3dc1c04a5df6f39966b3ca4a9c9874c001035ec 100644 (file)
@@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = {
        { .compatible = "fsl,imx27-sahara" },
        { /* sentinel */ }
 };
-MODULE_DEVICE_TABLE(platform, sahara_dt_ids);
+MODULE_DEVICE_TABLE(of, sahara_dt_ids);
 
 static int sahara_probe(struct platform_device *pdev)
 {
index ba6fc62e965163bca45cd55d148188812e08374a..5a18f82f732af57a319628190713e6bd054cf8b3 100644 (file)
@@ -4,7 +4,8 @@
  * Based on of-dma.c
  *
  * Copyright (C) 2013, Intel Corporation
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *         Mika Westerberg <mika.westerberg@linux.intel.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/ioport.h>
 #include <linux/acpi.h>
 #include <linux/acpi_dma.h>
 
 static LIST_HEAD(acpi_dma_list);
 static DEFINE_MUTEX(acpi_dma_lock);
 
+/**
+ * acpi_dma_parse_resource_group - match device and parse resource group
+ * @grp:       CSRT resource group
+ * @adev:      ACPI device to match with
+ * @adma:      struct acpi_dma of the given DMA controller
+ *
+ * Returns 1 on success, 0 when no information is available, or appropriate
+ * errno value on error.
+ *
+ * In order to match a device from DSDT table to the corresponding CSRT device
+ * we use MMIO address and IRQ.
+ */
+static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
+               struct acpi_device *adev, struct acpi_dma *adma)
+{
+       const struct acpi_csrt_shared_info *si;
+       struct list_head resource_list;
+       struct resource_list_entry *rentry;
+       resource_size_t mem = 0, irq = 0;
+       u32 vendor_id;
+       int ret;
+
+       if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
+               return -ENODEV;
+
+       INIT_LIST_HEAD(&resource_list);
+       ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+       if (ret <= 0)
+               return 0;
+
+       list_for_each_entry(rentry, &resource_list, node) {
+               if (resource_type(&rentry->res) == IORESOURCE_MEM)
+                       mem = rentry->res.start;
+               else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
+                       irq = rentry->res.start;
+       }
+
+       acpi_dev_free_resource_list(&resource_list);
+
+       /* Consider initial zero values as resource not found */
+       if (mem == 0 && irq == 0)
+               return 0;
+
+       si = (const struct acpi_csrt_shared_info *)&grp[1];
+
+       /* Match device by MMIO and IRQ */
+       if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
+               return 0;
+
+       vendor_id = le32_to_cpu(grp->vendor_id);
+       dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
+               (char *)&vendor_id, grp->device_id, grp->revision);
+
+       /* Check if the request line range is available */
+       if (si->base_request_line == 0 && si->num_handshake_signals == 0)
+               return 0;
+
+       adma->base_request_line = si->base_request_line;
+       adma->end_request_line = si->base_request_line +
+                                si->num_handshake_signals - 1;
+
+       dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n",
+               adma->base_request_line, adma->end_request_line);
+
+       return 1;
+}
+
+/**
+ * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
+ * @adev:      ACPI device to match with
+ * @adma:      struct acpi_dma of the given DMA controller
+ *
+ * CSRT or Core System Resources Table is a proprietary ACPI table
+ * introduced by Microsoft. This table can contain devices that are not in
+ * the system DSDT table. In particular DMA controllers might be described
+ * here.
+ *
+ * We are using this table to get the request line range of the specific DMA
+ * controller to be used later.
+ *
+ */
+static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
+{
+       struct acpi_csrt_group *grp, *end;
+       struct acpi_table_csrt *csrt;
+       acpi_status status;
+       int ret;
+
+       status = acpi_get_table(ACPI_SIG_CSRT, 0,
+                               (struct acpi_table_header **)&csrt);
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND)
+                       dev_warn(&adev->dev, "failed to get the CSRT table\n");
+               return;
+       }
+
+       grp = (struct acpi_csrt_group *)(csrt + 1);
+       end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
+
+       while (grp < end) {
+               ret = acpi_dma_parse_resource_group(grp, adev, adma);
+               if (ret < 0) {
+                       dev_warn(&adev->dev,
+                                "error in parsing resource group\n");
+                       return;
+               }
+
+               grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
+       }
+}
+
 /**
  * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
  * @dev:               struct device of DMA controller
@@ -61,6 +174,8 @@ int acpi_dma_controller_register(struct device *dev,
        adma->acpi_dma_xlate = acpi_dma_xlate;
        adma->data = data;
 
+       acpi_dma_parse_csrt(adev, adma);
+
        /* Now queue acpi_dma controller structure in list */
        mutex_lock(&acpi_dma_lock);
        list_add_tail(&adma->dma_controllers, &acpi_dma_list);
@@ -149,6 +264,45 @@ void devm_acpi_dma_controller_free(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
 
+/**
+ * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
+ * @adma:      struct acpi_dma of DMA controller
+ * @dma_spec:  dma specifier to update
+ *
+ * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
+ *
+ * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
+ * Descriptor":
+ *     DMA Request Line bits is a platform-relative number uniquely
+ *     identifying the request line assigned. Request line-to-Controller
+ *     mapping is done in a controller-specific OS driver.
+ * That's why we can safely adjust slave_id when the appropriate controller is
+ * found.
+ */
+static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
+               struct acpi_dma_spec *dma_spec)
+{
+       /* Set link to the DMA controller device */
+       dma_spec->dev = adma->dev;
+
+       /* Check if the request line range is available */
+       if (adma->base_request_line == 0 && adma->end_request_line == 0)
+               return 0;
+
+       /* Check if slave_id falls to the range */
+       if (dma_spec->slave_id < adma->base_request_line ||
+           dma_spec->slave_id > adma->end_request_line)
+               return -1;
+
+       /*
+        * Here we adjust slave_id. It should be a relative number to the base
+        * request line.
+        */
+       dma_spec->slave_id -= adma->base_request_line;
+
+       return 1;
+}
+
 struct acpi_dma_parser_data {
        struct acpi_dma_spec dma_spec;
        size_t index;
@@ -193,6 +347,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
        struct acpi_device *adev;
        struct acpi_dma *adma;
        struct dma_chan *chan = NULL;
+       int found;
 
        /* Check if the device was enumerated by ACPI */
        if (!dev || !ACPI_HANDLE(dev))
@@ -219,9 +374,20 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
        mutex_lock(&acpi_dma_lock);
 
        list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
-               dma_spec->dev = adma->dev;
+               /*
+                * We are not going to call translation function if slave_id
+                * doesn't fall to the request range.
+                */
+               found = acpi_dma_update_dma_spec(adma, dma_spec);
+               if (found < 0)
+                       continue;
                chan = adma->acpi_dma_xlate(dma_spec, adma);
-               if (chan)
+               /*
+                * Try to get a channel only from the DMA controller that
+                * matches the slave_id. See acpi_dma_update_dma_spec()
+                * description for the details.
+                */
+               if (found > 0 || chan)
                        break;
        }
 
index d8ce4ecfef18e079336654b8a53ea89fd613cb44..e88ded2c8d2f1bc8d73223ef2175fcbd391be5bf 100644 (file)
@@ -716,8 +716,7 @@ static int dmatest_func(void *data)
                }
                dma_async_issue_pending(chan);
 
-               wait_event_freezable_timeout(done_wait,
-                                            done.done || kthread_should_stop(),
+               wait_event_freezable_timeout(done_wait, done.done,
                                             msecs_to_jiffies(params->timeout));
 
                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
@@ -997,7 +996,6 @@ static void stop_threaded_test(struct dmatest_info *info)
 static int __restart_threaded_test(struct dmatest_info *info, bool run)
 {
        struct dmatest_params *params = &info->params;
-       int ret;
 
        /* Stop any running test first */
        __stop_threaded_test(info);
@@ -1012,13 +1010,23 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
        memcpy(params, &info->dbgfs_params, sizeof(*params));
 
        /* Run test with new parameters */
-       ret = __run_threaded_test(info);
-       if (ret) {
-               __stop_threaded_test(info);
-               pr_err("dmatest: Can't run test\n");
+       return __run_threaded_test(info);
+}
+
+static bool __is_threaded_test_run(struct dmatest_info *info)
+{
+       struct dmatest_chan *dtc;
+
+       list_for_each_entry(dtc, &info->channels, node) {
+               struct dmatest_thread *thread;
+
+               list_for_each_entry(thread, &dtc->threads, node) {
+                       if (!thread->done)
+                               return true;
+               }
        }
 
-       return ret;
+       return false;
 }
 
 static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
@@ -1091,22 +1099,10 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
 {
        struct dmatest_info *info = file->private_data;
        char buf[3];
-       struct dmatest_chan *dtc;
-       bool alive = false;
 
        mutex_lock(&info->lock);
-       list_for_each_entry(dtc, &info->channels, node) {
-               struct dmatest_thread *thread;
-
-               list_for_each_entry(thread, &dtc->threads, node) {
-                       if (!thread->done) {
-                               alive = true;
-                               break;
-                       }
-               }
-       }
 
-       if (alive) {
+       if (__is_threaded_test_run(info)) {
                buf[0] = 'Y';
        } else {
                __stop_threaded_test(info);
@@ -1132,7 +1128,12 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
 
        if (strtobool(buf, &bv) == 0) {
                mutex_lock(&info->lock);
-               ret = __restart_threaded_test(info, bv);
+
+               if (__is_threaded_test_run(info))
+                       ret = -EBUSY;
+               else
+                       ret = __restart_threaded_test(info, bv);
+
                mutex_unlock(&info->lock);
        }
 
index 1734feec47b13bd7e7d1370003035416cb2e3e70..71bf4ec300ea5ca10958c0a43f71c9e9175b7879 100644 (file)
@@ -1566,10 +1566,12 @@ static void dma_tc_handle(struct d40_chan *d40c)
                        return;
                }
 
-               if (d40_queue_start(d40c) == NULL)
+               if (d40_queue_start(d40c) == NULL) {
                        d40c->busy = false;
-               pm_runtime_mark_last_busy(d40c->base->dev);
-               pm_runtime_put_autosuspend(d40c->base->dev);
+
+                       pm_runtime_mark_last_busy(d40c->base->dev);
+                       pm_runtime_put_autosuspend(d40c->base->dev);
+               }
 
                d40_desc_remove(d40d);
                d40_desc_done(d40c, d40d);
index ce193409ebd32345e997dad798625c41565b438e..33f59ecd256e1487ce3dca17cd5f0822e2308c34 100644 (file)
@@ -1273,11 +1273,6 @@ static int tegra_dma_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, tdma);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "No mem resource for DMA\n");
-               return -EINVAL;
-       }
-
        tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(tdma->base_addr))
                return PTR_ERR(tdma->base_addr);
index 8c171fa1cb9bc65f5531e178bd45cb9a0ef0c9ad..845f04786c2de4baedf0952d2ec2b5a830e62602 100644 (file)
@@ -202,9 +202,9 @@ static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
                   amd64_inject_word_show, amd64_inject_word_store);
 static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
                   amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(inject_write, S_IWUSR,
                   NULL, amd64_inject_write_store);
-static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(inject_read,  S_IWUSR,
                   NULL, amd64_inject_read_store);
 
 
index b623c599e572975fd30db879da54194ef025dcf7..8bd1bb6dbe4739cf0914a129a12363c1bf157820 100644 (file)
@@ -523,13 +523,11 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
        struct efivar_entry *entry;
        int err;
 
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry)
-               return;
-
        /* Add new sysfs entries */
        while (1) {
-               memset(entry, 0, sizeof(*entry));
+               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry)
+                       return;
 
                err = efivar_init(efivar_update_sysfs_entry, entry,
                                  true, false, &efivar_sysfs_list);
index 87d567089f13653297ddc001ec0fda8746254465..573c449c49b9138a69f8aaf9a999e8a41898d2e1 100644 (file)
@@ -636,7 +636,7 @@ config GPIO_MAX7301
 
 config GPIO_MCP23S08
        tristate "Microchip MCP23xxx I/O expander"
-       depends on SPI_MASTER || I2C
+       depends on (SPI_MASTER && !I2C) || I2C
        help
          SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
          I/O expanders.
index 634c3d37f7b5e010fe315e0734c4205ab4abce58..62ef10a641c4250b273263c78e51bd2534ed632a 100644 (file)
@@ -324,6 +324,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
        resource_size_t start, len;
        struct lnw_gpio *lnw;
        u32 gpio_base;
+       u32 irq_base;
        int retval;
        int ngpio = id->driver_data;
 
@@ -345,6 +346,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
                retval = -EFAULT;
                goto err_ioremap;
        }
+       irq_base = *(u32 *)base;
        gpio_base = *((u32 *)base + 1);
        /* release the IO mapping, since we already get the info from bar1 */
        iounmap(base);
@@ -365,13 +367,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
                goto err_ioremap;
        }
 
-       lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
-                                           &lnw_gpio_irq_ops, lnw);
-       if (!lnw->domain) {
-               retval = -ENOMEM;
-               goto err_ioremap;
-       }
-
        lnw->reg_base = base;
        lnw->chip.label = dev_name(&pdev->dev);
        lnw->chip.request = lnw_gpio_request;
@@ -384,6 +379,14 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
        lnw->chip.ngpio = ngpio;
        lnw->chip.can_sleep = 0;
        lnw->pdev = pdev;
+
+       lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
+                                           &lnw_gpio_irq_ops, lnw);
+       if (!lnw->domain) {
+               retval = -ENOMEM;
+               goto err_ioremap;
+       }
+
        pci_set_drvdata(pdev, lnw);
        retval = gpiochip_add(&lnw->chip);
        if (retval) {
index b73366523faebf4db81c3fbb020365f8afd75123..0966f2637ad2dc849f12d7224dbf7d6af1c75c13 100644 (file)
@@ -496,8 +496,7 @@ err_irq_alloc_descs:
 err_gpiochip_add:
        while (--i >= 0) {
                chip--;
-               ret = gpiochip_remove(&chip->gpio);
-               if (ret)
+               if (gpiochip_remove(&chip->gpio))
                        dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
        }
        kfree(chip_save);
index bf69a7eff370cbabc72ba01591727ba20c81338c..3a4816adc137de912b51fca601c50d65f8baea19 100644 (file)
@@ -619,11 +619,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
         * per-CPU registers */
        if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               if (!res) {
-                       dev_err(&pdev->dev, "Cannot get memory resource\n");
-                       return -ENODEV;
-               }
-
                mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
                                                               res);
                if (IS_ERR(mvchip->percpu_membase))
index 25000b0f84532645ad7eab5d85278dfdd9b2557b..f8e6af20dfbf1d0efc5fce0060b6ace3792cb1ad 100644 (file)
@@ -326,7 +326,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
 
        err = bgpio_init(&port->bgc, &pdev->dev, 4,
                         port->base + PINCTRL_DIN(port),
-                        port->base + PINCTRL_DOUT(port), NULL,
+                        port->base + PINCTRL_DOUT(port) + MXS_SET,
+                        port->base + PINCTRL_DOUT(port) + MXS_CLR,
                         port->base + PINCTRL_DOE(port), NULL, 0);
        if (err)
                goto out_irqdesc_free;
index 2050891d9c65a4936533fb14a4b2d8b95b379cb5..d3f7d2db870f985253a603f3f53f2b5475ee97f6 100644 (file)
@@ -69,6 +69,7 @@ struct gpio_bank {
        bool is_mpuio;
        bool dbck_flag;
        bool loses_context;
+       bool context_valid;
        int stride;
        u32 width;
        int context_loss_count;
@@ -1128,6 +1129,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
                        bank->loses_context = true;
        } else {
                bank->loses_context = pdata->loses_context;
+
+               if (bank->loses_context)
+                       bank->get_context_loss_count =
+                               pdata->get_context_loss_count;
        }
 
 
@@ -1178,9 +1183,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
        omap_gpio_chip_init(bank);
        omap_gpio_show_rev(bank);
 
-       if (bank->loses_context)
-               bank->get_context_loss_count = pdata->get_context_loss_count;
-
        pm_runtime_put(bank->dev);
 
        list_add_tail(&bank->node, &omap_gpio_list);
@@ -1259,6 +1261,8 @@ update_gpio_context_count:
        return 0;
 }
 
+static void omap_gpio_init_context(struct gpio_bank *p);
+
 static int omap_gpio_runtime_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1268,6 +1272,20 @@ static int omap_gpio_runtime_resume(struct device *dev)
        int c;
 
        spin_lock_irqsave(&bank->lock, flags);
+
+       /*
+        * On the first resume during the probe, the context has not
+        * been initialised and so initialise it now. Also initialise
+        * the context loss count.
+        */
+       if (bank->loses_context && !bank->context_valid) {
+               omap_gpio_init_context(bank);
+
+               if (bank->get_context_loss_count)
+                       bank->context_loss_count =
+                               bank->get_context_loss_count(bank->dev);
+       }
+
        _gpio_dbck_enable(bank);
 
        /*
@@ -1384,6 +1402,29 @@ void omap2_gpio_resume_after_idle(void)
 }
 
 #if defined(CONFIG_PM_RUNTIME)
+static void omap_gpio_init_context(struct gpio_bank *p)
+{
+       struct omap_gpio_reg_offs *regs = p->regs;
+       void __iomem *base = p->base;
+
+       p->context.ctrl         = __raw_readl(base + regs->ctrl);
+       p->context.oe           = __raw_readl(base + regs->direction);
+       p->context.wake_en      = __raw_readl(base + regs->wkup_en);
+       p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0);
+       p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1);
+       p->context.risingdetect = __raw_readl(base + regs->risingdetect);
+       p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
+       p->context.irqenable1   = __raw_readl(base + regs->irqenable);
+       p->context.irqenable2   = __raw_readl(base + regs->irqenable2);
+
+       if (regs->set_dataout && p->regs->clr_dataout)
+               p->context.dataout = __raw_readl(base + regs->set_dataout);
+       else
+               p->context.dataout = __raw_readl(base + regs->dataout);
+
+       p->context_valid = true;
+}
+
 static void omap_gpio_restore_context(struct gpio_bank *bank)
 {
        __raw_writel(bank->context.wake_en,
@@ -1421,6 +1462,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
 #else
 #define omap_gpio_runtime_suspend NULL
 #define omap_gpio_runtime_resume NULL
+static void omap_gpio_init_context(struct gpio_bank *p) {}
 #endif
 
 static const struct dev_pm_ops gpio_pm_ops = {
index cdf599687cf7b1e4704fa213cf77961bb8f66cfd..0fec097e838df23c264b62405b2cf10bd0531bae 100644 (file)
@@ -424,8 +424,7 @@ end:
 err_request_irq:
        irq_free_descs(irq_base, gpio_pins[chip->ioh]);
 
-       ret = gpiochip_remove(&chip->gpio);
-       if (ret)
+       if (gpiochip_remove(&chip->gpio))
                dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
 
 err_gpiochip_add:
index 1e4de16ceb419e148c9c233aef0746143530efca..5af65719b95dafad605fb8f1f733ead623b78bf5 100644 (file)
@@ -272,10 +272,8 @@ static int sch_gpio_probe(struct platform_device *pdev)
        return 0;
 
 err_sch_gpio_resume:
-       err = gpiochip_remove(&sch_gpio_core);
-       if (err)
-               dev_err(&pdev->dev, "%s failed, %d\n",
-                               "gpiochip_remove()", err);
+       if (gpiochip_remove(&sch_gpio_core))
+               dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
 
 err_sch_gpio_core:
        release_region(res->start, resource_size(res));
index da4cb5b0cb87612fed241b9a4ebc6245716377cd..9a62672f1bed97185b26132966dd0ddcb50534f3 100644 (file)
@@ -463,11 +463,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Missing MEM resource\n");
-               return -ENODEV;
-       }
-
        regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(regs))
                return PTR_ERR(regs);
index 095ab14cea4d16d672e009fb40d6929a2e5d16e0..5ac2919197fe70775f8d17fda5a6dafaab9af677 100644 (file)
@@ -446,7 +446,8 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
        return ret;
 
 err_gpiob:
-       ret = gpiochip_remove(&vb_gpio->gpioa);
+       if (gpiochip_remove(&vb_gpio->gpioa))
+               dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
 
 err_gpioa:
        return ret;
index 3a8f7e6db2950fc175759e558c7663ae84d3702f..e7e92429d10f9e36d80d48e5f8a0004e4eb0284e 100644 (file)
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
 {
        struct drm_crtc *crtc;
 
+       /* Locking is currently fubar in the panic handler. */
+       if (oops_in_progress)
+               return;
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
                WARN_ON(!mutex_is_locked(&crtc->mutex));
 
@@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
        else
                return "unknown";
 }
+EXPORT_SYMBOL(drm_get_connector_status_name);
 
 /**
  * drm_mode_object_get - allocate a new modeset identifier
index e974f9309b72697d3c9918ab979c8469d548c25d..ed1334e27c33283442fceb604892e55666bdfe2c 100644 (file)
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                connector->helper_private;
        int count = 0;
        int mode_flags = 0;
+       bool verbose_prune = true;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
                        drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
                        connector->base.id, drm_get_connector_name(connector));
                drm_mode_connector_update_edid_property(connector, NULL);
+               verbose_prune = false;
                goto prune;
        }
 
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
        }
 
 prune:
-       drm_mode_prune_invalid(dev, &connector->modes, true);
+       drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
 
        if (list_empty(&connector->modes))
                return 0;
@@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
                        continue;
 
                connector->status = connector->funcs->detect(connector, false);
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
-                             connector->base.id,
-                             drm_get_connector_name(connector),
-                             old_status, connector->status);
-               if (old_status != connector->status)
+               if (old_status != connector->status) {
+                       const char *old, *new;
+
+                       old = drm_get_connector_status_name(old_status);
+                       new = drm_get_connector_status_name(connector->status);
+
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+                                     "status updated from %s to %s\n",
+                                     connector->base.id,
+                                     drm_get_connector_name(connector),
+                                     old, new);
+
                        changed = true;
+               }
        }
 
        mutex_unlock(&dev->mode_config.mutex);
@@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
                old_status = connector->status;
 
                connector->status = connector->funcs->detect(connector, false);
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
                              connector->base.id,
                              drm_get_connector_name(connector),
-                             old_status, connector->status);
+                             drm_get_connector_status_name(old_status),
+                             drm_get_connector_status_name(connector->status));
                if (old_status != connector->status)
                        changed = true;
        }
index 8d4f29075af5bd24f0b03461cac4d35c511530dd..9cc247f555028f41046cbacd57a6b31cdf8f5e20 100644 (file)
@@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
 #define DRM_IOCTL_DEF(ioctl, _func, _flags) \
-       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
 
 /** Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
 {
        struct drm_file *file_priv = filp->private_data;
        struct drm_device *dev;
-       const struct drm_ioctl_desc *ioctl;
+       const struct drm_ioctl_desc *ioctl = NULL;
        drm_ioctl_t *func;
        unsigned int nr = DRM_IOCTL_NR(cmd);
        int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
        atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
        ++file_priv->ioctl_count;
 
-       DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
-                 task_pid_nr(current), cmd, nr,
-                 (long)old_encode_dev(file_priv->minor->device),
-                 file_priv->authenticated);
-
        if ((nr >= DRM_CORE_IOCTL_COUNT) &&
            ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
                goto err_i1;
@@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp,
        } else
                goto err_i1;
 
+       DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->device),
+                 file_priv->authenticated, ioctl->name);
+
        /* Do not trust userspace, use our own definition */
        func = ioctl->func;
        /* is there a local override? */
@@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp,
        }
 
       err_i1:
+       if (!ioctl)
+               DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+                         task_pid_nr(current),
+                         (long)old_encode_dev(file_priv->minor->device),
+                         file_priv->authenticated, cmd, nr);
+
        if (kdata != stack_kdata)
                kfree(kdata);
        atomic_dec(&dev->ioctl_count);
index 48c52f7df4e63affac527e1f86e06188a03e2021..0cfb60f5476655edc097ca71c648d11544f3357e 100644 (file)
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
                         struct i2c_adapter *adap,
                         const struct i2c_board_info *info)
 {
-       char modalias[sizeof(I2C_MODULE_PREFIX)
-                     + I2C_NAME_SIZE];
        struct module *module = NULL;
        struct i2c_client *client;
        struct drm_i2c_encoder_driver *encoder_drv;
        int err = 0;
 
-       snprintf(modalias, sizeof(modalias),
-                "%s%s", I2C_MODULE_PREFIX, info->type);
-       request_module(modalias);
+       request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
        client = i2c_new_device(adap, info);
        if (!client) {
index a6a8643a6a77b112518e363cb656800e44a6513d..8bcce7866d368e12df7c86edb17ff235ba58f4a7 100644 (file)
@@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off);
  */
 void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
 {
-       /* vblank is not initialized (IRQ not installed ?) */
+       /* vblank is not initialized (IRQ not installed ?), or has been freed */
        if (!dev->num_crtcs)
                return;
        /*
@@ -1076,6 +1076,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
 {
        unsigned long irqflags;
 
+       /* vblank is not initialized (IRQ not installed ?), or has been freed */
+       if (!dev->num_crtcs)
+               return;
+
        if (dev->vblank_inmodeset[crtc]) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
                dev->vblank_disable_allowed = 1;
index db1e2d6f90d7221d713c04b70560d85a08ad6732..07cf99cc886283aedf9feee690d71ee6fbe2fb77 100644 (file)
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 EXPORT_SYMBOL(drm_mm_debug_table);
 
 #if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
 {
-       struct drm_mm_node *entry;
-       unsigned long total_used = 0, total_free = 0, total = 0;
        unsigned long hole_start, hole_end, hole_size;
 
-       hole_start = drm_mm_hole_node_start(&mm->head_node);
-       hole_end = drm_mm_hole_node_end(&mm->head_node);
-       hole_size = hole_end - hole_start;
-       if (hole_size)
+       if (entry->hole_follows) {
+               hole_start = drm_mm_hole_node_start(entry);
+               hole_end = drm_mm_hole_node_end(entry);
+               hole_size = hole_end - hole_start;
                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
                                hole_start, hole_end, hole_size);
-       total_free += hole_size;
+               return hole_size;
+       }
+
+       return 0;
+}
+
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+       struct drm_mm_node *entry;
+       unsigned long total_used = 0, total_free = 0, total = 0;
+
+       total_free += drm_mm_dump_hole(m, &mm->head_node);
 
        drm_mm_for_each_node(entry, mm) {
                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
                                entry->start, entry->start + entry->size,
                                entry->size);
                total_used += entry->size;
-               if (entry->hole_follows) {
-                       hole_start = drm_mm_hole_node_start(entry);
-                       hole_end = drm_mm_hole_node_end(entry);
-                       hole_size = hole_end - hole_start;
-                       seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
-                                       hole_start, hole_end, hole_size);
-                       total_free += hole_size;
-               }
+               total_free += drm_mm_dump_hole(m, entry);
        }
        total = total_free + total_used;
 
index faa79df0264802e985719da65be37ee369a87a69..a371ff865a887755b81de57663a819429a34ed4c 100644 (file)
@@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
                                was_digit = false;
                        } else
                                goto done;
+                       break;
                case '0' ... '9':
                        was_digit = true;
                        break;
index e8894bc9e6d5edf0ecf07cea5078f3961191030d..c200e4d71e3d96a84bc8913b82cde320b0787ec9 100644 (file)
@@ -48,6 +48,8 @@ struct exynos_drm_crtc {
        unsigned int                    pipe;
        unsigned int                    dpms;
        enum exynos_crtc_mode           mode;
+       wait_queue_head_t               pending_flip_queue;
+       atomic_t                        pending_flip;
 };
 
 static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -61,6 +63,13 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
                return;
        }
 
+       if (mode > DRM_MODE_DPMS_ON) {
+               /* wait for the completion of page flip. */
+               wait_event(exynos_crtc->pending_flip_queue,
+                               atomic_read(&exynos_crtc->pending_flip) == 0);
+               drm_vblank_off(crtc->dev, exynos_crtc->pipe);
+       }
+
        exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
        exynos_crtc->dpms = mode;
 }
@@ -217,7 +226,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
                ret = drm_vblank_get(dev, exynos_crtc->pipe);
                if (ret) {
                        DRM_DEBUG("failed to acquire vblank counter\n");
-                       list_del(&event->base.link);
 
                        goto out;
                }
@@ -225,6 +233,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
                spin_lock_irq(&dev->event_lock);
                list_add_tail(&event->base.link,
                                &dev_priv->pageflip_event_list);
+               atomic_set(&exynos_crtc->pending_flip, 1);
                spin_unlock_irq(&dev->event_lock);
 
                crtc->fb = fb;
@@ -344,6 +353,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
 
        exynos_crtc->pipe = nr;
        exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+       init_waitqueue_head(&exynos_crtc->pending_flip_queue);
+       atomic_set(&exynos_crtc->pending_flip, 0);
        exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
        if (!exynos_crtc->plane) {
                kfree(exynos_crtc);
@@ -398,7 +409,8 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
 {
        struct exynos_drm_private *dev_priv = dev->dev_private;
        struct drm_pending_vblank_event *e, *t;
-       struct timeval now;
+       struct drm_crtc *drm_crtc = dev_priv->crtc[crtc];
+       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
        unsigned long flags;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -411,14 +423,11 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               do_gettimeofday(&now);
-               e->event.sequence = 0;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
-
-               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
+               list_del(&e->base.link);
+               drm_send_vblank_event(dev, -1, e);
                drm_vblank_put(dev, crtc);
+               atomic_set(&exynos_crtc->pending_flip, 0);
+               wake_up(&exynos_crtc->pending_flip_queue);
        }
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
index 68f0045f86b860d7ecd4b7477a98719a950a7f95..8f007aaeffc3ea2b6e97f067e6be49b123df1eae 100644 (file)
@@ -182,7 +182,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
        helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
                        &exynos_gem_obj->base);
-       if (IS_ERR_OR_NULL(helper->fb)) {
+       if (IS_ERR(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
                ret = PTR_ERR(helper->fb);
                goto err_destroy_gem;
index 773f583fa9648c97ba4e5073a3a82681b50ef0af..4a1616a18ab7e9ff019ed4b6f50212789a78126a 100644 (file)
@@ -12,9 +12,9 @@
  *
  */
 #include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
@@ -1845,7 +1845,7 @@ static int fimc_probe(struct platform_device *pdev)
        }
 
        ctx->irq = res->start;
-       ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+       ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler,
                IRQF_ONESHOT, "drm_fimc", ctx);
        if (ret < 0) {
                dev_err(dev, "failed to request irq.\n");
@@ -1854,7 +1854,7 @@ static int fimc_probe(struct platform_device *pdev)
 
        ret = fimc_setup_clocks(ctx);
        if (ret < 0)
-               goto err_free_irq;
+               return ret;
 
        ippdrv = &ctx->ippdrv;
        ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
@@ -1884,7 +1884,7 @@ static int fimc_probe(struct platform_device *pdev)
                goto err_pm_dis;
        }
 
-       dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+       dev_info(dev, "drm fimc registered successfully.\n");
 
        return 0;
 
@@ -1892,8 +1892,6 @@ err_pm_dis:
        pm_runtime_disable(dev);
 err_put_clk:
        fimc_put_clocks(ctx);
-err_free_irq:
-       free_irq(ctx->irq, ctx);
 
        return ret;
 }
@@ -1911,8 +1909,6 @@ static int fimc_remove(struct platform_device *pdev)
        pm_runtime_set_suspended(dev);
        pm_runtime_disable(dev);
 
-       free_irq(ctx->irq, ctx);
-
        return 0;
 }
 
index 746b282b343abb0328b9c4b6aaa9b2b0e0a89e03..97c61dbffd82ee36cfe2a92dfaf5b0b618ee07cd 100644 (file)
@@ -885,7 +885,7 @@ static int fimd_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (pdev->dev.of_node) {
+       if (dev->of_node) {
                pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
                if (!pdata) {
                        DRM_ERROR("memory allocation for pdata failed\n");
@@ -899,7 +899,7 @@ static int fimd_probe(struct platform_device *pdev)
                        return ret;
                }
        } else {
-               pdata = pdev->dev.platform_data;
+               pdata = dev->platform_data;
                if (!pdata) {
                        DRM_ERROR("no platform data specified\n");
                        return -EINVAL;
@@ -912,7 +912,7 @@ static int fimd_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
@@ -930,7 +930,7 @@ static int fimd_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-       ctx->regs = devm_ioremap_resource(&pdev->dev, res);
+       ctx->regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
@@ -942,7 +942,7 @@ static int fimd_probe(struct platform_device *pdev)
 
        ctx->irq = res->start;
 
-       ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler,
+       ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler,
                                                        0, "drm_fimd", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
index 47a493c8a71f2629b7acf0de109bcd3eefc15bfd..af75434ee4d7993aacb9d6c3884f2177937f00f6 100644 (file)
@@ -1379,7 +1379,7 @@ static int g2d_probe(struct platform_device *pdev)
        struct exynos_drm_subdrv *subdrv;
        int ret;
 
-       g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
+       g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
        if (!g2d) {
                dev_err(dev, "failed to allocate driver data\n");
                return -ENOMEM;
@@ -1417,7 +1417,7 @@ static int g2d_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-       g2d->regs = devm_ioremap_resource(&pdev->dev, res);
+       g2d->regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(g2d->regs)) {
                ret = PTR_ERR(g2d->regs);
                goto err_put_clk;
@@ -1430,7 +1430,7 @@ static int g2d_probe(struct platform_device *pdev)
                goto err_put_clk;
        }
 
-       ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
+       ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
                                                                "drm_g2d", g2d);
        if (ret < 0) {
                dev_err(dev, "irq request failed\n");
index 7841c3b8a20e022e94a0983c2ba09b232ab66fc4..762f40d548b76398904480f839b1741721aca2b9 100644 (file)
@@ -1704,7 +1704,7 @@ static int gsc_probe(struct platform_device *pdev)
        }
 
        ctx->irq = res->start;
-       ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+       ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
                IRQF_ONESHOT, "drm_gsc", ctx);
        if (ret < 0) {
                dev_err(dev, "failed to request irq.\n");
@@ -1725,7 +1725,7 @@ static int gsc_probe(struct platform_device *pdev)
        ret = gsc_init_prop_list(ippdrv);
        if (ret < 0) {
                dev_err(dev, "failed to init property list.\n");
-               goto err_get_irq;
+               return ret;
        }
 
        DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
@@ -1743,15 +1743,12 @@ static int gsc_probe(struct platform_device *pdev)
                goto err_ippdrv_register;
        }
 
-       dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+       dev_info(dev, "drm gsc registered successfully.\n");
 
        return 0;
 
 err_ippdrv_register:
-       devm_kfree(dev, ippdrv->prop_list);
        pm_runtime_disable(dev);
-err_get_irq:
-       free_irq(ctx->irq, ctx);
        return ret;
 }
 
@@ -1761,15 +1758,12 @@ static int gsc_remove(struct platform_device *pdev)
        struct gsc_context *ctx = get_gsc_context(dev);
        struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 
-       devm_kfree(dev, ippdrv->prop_list);
        exynos_drm_ippdrv_unregister(ippdrv);
        mutex_destroy(&ctx->lock);
 
        pm_runtime_set_suspended(dev);
        pm_runtime_disable(dev);
 
-       free_irq(ctx->irq, ctx);
-
        return 0;
 }
 
index ba2f0f1aa05f41f895257c697b4926d726159e9f..437fb947e46dbce4a0b510710649eba8e5a3c6a8 100644 (file)
@@ -442,7 +442,7 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
                DRM_LOG_KMS("failed to alloc common hdmi context.\n");
                return -ENOMEM;
index 29d2ad314490126302ed65e5cb9f79b5afd896f1..be1e884634664533ac9971ef753434a935fac1ee 100644 (file)
@@ -222,7 +222,7 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
                /* find ipp driver using idr */
                ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
                        ipp_id);
-               if (IS_ERR_OR_NULL(ippdrv)) {
+               if (IS_ERR(ippdrv)) {
                        DRM_ERROR("not found ipp%d driver.\n", ipp_id);
                        return ippdrv;
                }
@@ -388,7 +388,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
        DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
 
        ippdrv = ipp_find_drv_by_handle(prop_id);
-       if (IS_ERR_OR_NULL(ippdrv)) {
+       if (IS_ERR(ippdrv)) {
                DRM_ERROR("failed to get ipp driver.\n");
                return -EINVAL;
        }
@@ -492,7 +492,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
 
        /* find ipp driver using ipp id */
        ippdrv = ipp_find_driver(ctx, property);
-       if (IS_ERR_OR_NULL(ippdrv)) {
+       if (IS_ERR(ippdrv)) {
                DRM_ERROR("failed to get ipp driver.\n");
                return -EINVAL;
        }
@@ -521,19 +521,19 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
        c_node->state = IPP_STATE_IDLE;
 
        c_node->start_work = ipp_create_cmd_work();
-       if (IS_ERR_OR_NULL(c_node->start_work)) {
+       if (IS_ERR(c_node->start_work)) {
                DRM_ERROR("failed to create start work.\n");
                goto err_clear;
        }
 
        c_node->stop_work = ipp_create_cmd_work();
-       if (IS_ERR_OR_NULL(c_node->stop_work)) {
+       if (IS_ERR(c_node->stop_work)) {
                DRM_ERROR("failed to create stop work.\n");
                goto err_free_start;
        }
 
        c_node->event_work = ipp_create_event_work();
-       if (IS_ERR_OR_NULL(c_node->event_work)) {
+       if (IS_ERR(c_node->event_work)) {
                DRM_ERROR("failed to create event work.\n");
                goto err_free_stop;
        }
@@ -915,7 +915,7 @@ static int ipp_queue_buf_with_run(struct device *dev,
        DRM_DEBUG_KMS("%s\n", __func__);
 
        ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
-       if (IS_ERR_OR_NULL(ippdrv)) {
+       if (IS_ERR(ippdrv)) {
                DRM_ERROR("failed to get ipp driver.\n");
                return -EFAULT;
        }
@@ -1909,7 +1909,7 @@ static int ipp_probe(struct platform_device *pdev)
        struct exynos_drm_subdrv *subdrv;
        int ret;
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
@@ -1963,7 +1963,7 @@ static int ipp_probe(struct platform_device *pdev)
                goto err_cmd_workq;
        }
 
-       dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+       dev_info(dev, "drm ipp registered successfully.\n");
 
        return 0;
 
index 947f09f15ad1abe53179b77d330db5602a936dc5..9b6c70964d71c969106ed41f5f849979e2094601 100644 (file)
@@ -666,8 +666,8 @@ static int rotator_probe(struct platform_device *pdev)
                return rot->irq;
        }
 
-       ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
-                       IRQF_ONESHOT, "drm_rotator", rot);
+       ret = devm_request_threaded_irq(dev, rot->irq, NULL,
+                       rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot);
        if (ret < 0) {
                dev_err(dev, "failed to request irq\n");
                return ret;
@@ -676,8 +676,7 @@ static int rotator_probe(struct platform_device *pdev)
        rot->clock = devm_clk_get(dev, "rotator");
        if (IS_ERR(rot->clock)) {
                dev_err(dev, "failed to get clock\n");
-               ret = PTR_ERR(rot->clock);
-               goto err_clk_get;
+               return PTR_ERR(rot->clock);
        }
 
        pm_runtime_enable(dev);
@@ -709,10 +708,7 @@ static int rotator_probe(struct platform_device *pdev)
        return 0;
 
 err_ippdrv_register:
-       devm_kfree(dev, ippdrv->prop_list);
        pm_runtime_disable(dev);
-err_clk_get:
-       free_irq(rot->irq, rot);
        return ret;
 }
 
@@ -722,13 +718,10 @@ static int rotator_remove(struct platform_device *pdev)
        struct rot_context *rot = dev_get_drvdata(dev);
        struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
 
-       devm_kfree(dev, ippdrv->prop_list);
        exynos_drm_ippdrv_unregister(ippdrv);
 
        pm_runtime_disable(dev);
 
-       free_irq(rot->irq, rot);
-
        return 0;
 }
 
index 9504b0cd825a4dc9d4b81c2be674eb0897eb137d..24376c194a5ec025a1b5f300656c23bea45f50d3 100644 (file)
@@ -594,7 +594,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
@@ -612,7 +612,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ctx);
 
-       ret = device_create_file(&pdev->dev, &dev_attr_connection);
+       ret = device_create_file(dev, &dev_attr_connection);
        if (ret < 0)
                DRM_INFO("failed to create connection sysfs.\n");
 
index bbfc3840080cf03b718e76fb0aa9f3119ec4ba24..fd1426dca8824d48db62aa1bb27d62e7b4070d9f 100644 (file)
@@ -1946,14 +1946,14 @@ static int hdmi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("[%d]\n", __LINE__);
 
-       if (pdev->dev.of_node) {
+       if (dev->of_node) {
                pdata = drm_hdmi_dt_parse_pdata(dev);
                if (IS_ERR(pdata)) {
                        DRM_ERROR("failed to parse dt\n");
                        return PTR_ERR(pdata);
                }
        } else {
-               pdata = pdev->dev.platform_data;
+               pdata = dev->platform_data;
        }
 
        if (!pdata) {
@@ -1961,14 +1961,14 @@ static int hdmi_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+       drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
                                                                GFP_KERNEL);
        if (!drm_hdmi_ctx) {
                DRM_ERROR("failed to allocate common hdmi context.\n");
                return -ENOMEM;
        }
 
-       hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context),
+       hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
                                                                GFP_KERNEL);
        if (!hdata) {
                DRM_ERROR("out of memory\n");
@@ -1985,7 +1985,7 @@ static int hdmi_probe(struct platform_device *pdev)
        if (dev->of_node) {
                const struct of_device_id *match;
                match = of_match_node(of_match_ptr(hdmi_match_types),
-                                       pdev->dev.of_node);
+                                       dev->of_node);
                if (match == NULL)
                        return -ENODEV;
                hdata->type = (enum hdmi_type)match->data;
@@ -2005,16 +2005,11 @@ static int hdmi_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               DRM_ERROR("failed to find registers\n");
-               return -ENOENT;
-       }
-
-       hdata->regs = devm_ioremap_resource(&pdev->dev, res);
+       hdata->regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(hdata->regs))
                return PTR_ERR(hdata->regs);
 
-       ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
+       ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
        if (ret) {
                DRM_ERROR("failed to request HPD gpio\n");
                return ret;
@@ -2046,7 +2041,7 @@ static int hdmi_probe(struct platform_device *pdev)
 
        hdata->hpd = gpio_get_value(hdata->hpd_gpio);
 
-       ret = request_threaded_irq(hdata->irq, NULL,
+       ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
                        hdmi_irq_thread, IRQF_TRIGGER_RISING |
                        IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                        "hdmi", drm_hdmi_ctx);
@@ -2075,16 +2070,11 @@ err_ddc:
 static int hdmi_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
-       struct hdmi_context *hdata = ctx->ctx;
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
        pm_runtime_disable(dev);
 
-       free_irq(hdata->irq, hdata);
-
-
        /* hdmiphy i2c driver */
        i2c_del_driver(&hdmiphy_driver);
        /* DDC i2c driver */
index ec3e376b7e01e08f82c68d26b6a27e7e5ff0b55b..7c197d3820c5583c2023831f9df5b1c7d5891f73 100644 (file)
@@ -1061,7 +1061,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
                return -ENXIO;
        }
 
-       mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
+       mixer_res->mixer_regs = devm_ioremap(dev, res->start,
                                                        resource_size(res));
        if (mixer_res->mixer_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
@@ -1074,7 +1074,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
                return -ENXIO;
        }
 
-       ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
+       ret = devm_request_irq(dev, res->start, mixer_irq_handler,
                                                        0, "drm_mixer", ctx);
        if (ret) {
                dev_err(dev, "request interrupt failed.\n");
@@ -1118,7 +1118,7 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
                return -ENXIO;
        }
 
-       mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
+       mixer_res->vp_regs = devm_ioremap(dev, res->start,
                                                        resource_size(res));
        if (mixer_res->vp_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
@@ -1169,14 +1169,14 @@ static int mixer_probe(struct platform_device *pdev)
 
        dev_info(dev, "probe start\n");
 
-       drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+       drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
                                                                GFP_KERNEL);
        if (!drm_hdmi_ctx) {
                DRM_ERROR("failed to allocate common hdmi context.\n");
                return -ENOMEM;
        }
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
                DRM_ERROR("failed to alloc mixer context.\n");
                return -ENOMEM;
@@ -1187,14 +1187,14 @@ static int mixer_probe(struct platform_device *pdev)
        if (dev->of_node) {
                const struct of_device_id *match;
                match = of_match_node(of_match_ptr(mixer_match_types),
-                                                         pdev->dev.of_node);
+                                                         dev->of_node);
                drv = (struct mixer_drv_data *)match->data;
        } else {
                drv = (struct mixer_drv_data *)
                        platform_get_device_id(pdev)->driver_data;
        }
 
-       ctx->dev = &pdev->dev;
+       ctx->dev = dev;
        ctx->parent_ctx = (void *)drm_hdmi_ctx;
        drm_hdmi_ctx->ctx = (void *)ctx;
        ctx->vp_enabled = drv->is_vp_enabled;
index 3cfd0931fbfb1ce41013475ef70c62343b8b4cd8..82430ad8ba623934fd74a3f12059c5d180c92fae 100644 (file)
@@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
        size_t addr = 0;
        struct gtt_range *gt;
        struct drm_gem_object *obj;
-       int ret;
+       int ret = 0;
 
        /* if we want to turn of the cursor ignore width and height */
        if (!handle) {
@@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
 
        if (obj->size < width * height * 4) {
                dev_dbg(dev->dev, "buffer is to small\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto unref_cursor;
        }
 
        gt = container_of(obj, struct gtt_range, gem);
@@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
        ret = psb_gtt_pin(gt);
        if (ret) {
                dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
-               return ret;
+               goto unref_cursor;
        }
 
        addr = gt->offset;      /* Or resource.start ??? */
@@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
                                                        struct gtt_range, gem);
                psb_gtt_unpin(gt);
                drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
-               psb_intel_crtc->cursor_obj = obj;
        }
-       return 0;
+
+       psb_intel_crtc->cursor_obj = obj;
+       return ret;
+
+unref_cursor:
+       drm_gem_object_unreference(obj);
+       return ret;
 }
 
 static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
        kfree(psb_intel_crtc);
 }
 
+static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
+{
+       struct gtt_range *gt;
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+       if (crtc->fb) {
+               gt = to_psb_fb(crtc->fb)->gtt;
+               psb_gtt_unpin(gt);
+       }
+}
+
 const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
        .dpms = cdv_intel_crtc_dpms,
        .mode_fixup = cdv_intel_crtc_mode_fixup,
@@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
        .mode_set_base = cdv_intel_pipe_set_base,
        .prepare = cdv_intel_crtc_prepare,
        .commit = cdv_intel_crtc_commit,
+       .disable = cdv_intel_crtc_disable,
 };
 
 const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
index 1534e220097ab8bbe5b8e272df0d8f007b6447a2..8b1b6d923abe82ab8119ce7134d3a45b681a54f6 100644 (file)
@@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        unsigned long address;
        int ret;
        unsigned long pfn;
-       /* FIXME: assumes fb at stolen base which may not be true */
-       unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+       unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
+                                 psbfb->gtt->offset;
 
        page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
index 6e8f42b61ff64ef7609de8d71e1b0696bf7887a4..6666493789d1338b5db875265f99339850ceb3a2 100644 (file)
@@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
        struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
        struct drm_gem_object *obj;
        void *tmp_dst, *tmp_src;
-       int ret, i, cursor_pages;
+       int ret = 0, i, cursor_pages;
 
        /* if we want to turn of the cursor ignore width and height */
        if (!handle) {
@@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
 
        if (obj->size < width * height * 4) {
                dev_dbg(dev->dev, "buffer is to small\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto unref_cursor;
        }
 
        gt = container_of(obj, struct gtt_range, gem);
@@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
        ret = psb_gtt_pin(gt);
        if (ret) {
                dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
-               return ret;
+               goto unref_cursor;
        }
 
        if (dev_priv->ops->cursor_needs_phys) {
                if (cursor_gt == NULL) {
                        dev_err(dev->dev, "No hardware cursor mem available");
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto unref_cursor;
                }
 
                /* Prevent overflow */
@@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
                                                        struct gtt_range, gem);
                psb_gtt_unpin(gt);
                drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
-               psb_intel_crtc->cursor_obj = obj;
        }
-       return 0;
+
+       psb_intel_crtc->cursor_obj = obj;
+       return ret;
+
+unref_cursor:
+       drm_gem_object_unreference(obj);
+       return ret;
 }
 
 static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
        kfree(psb_intel_crtc);
 }
 
+static void psb_intel_crtc_disable(struct drm_crtc *crtc)
+{
+       struct gtt_range *gt;
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+       if (crtc->fb) {
+               gt = to_psb_fb(crtc->fb)->gtt;
+               psb_gtt_unpin(gt);
+       }
+}
+
 const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
        .dpms = psb_intel_crtc_dpms,
        .mode_fixup = psb_intel_crtc_mode_fixup,
@@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
        .mode_set_base = psb_intel_pipe_set_base,
        .prepare = psb_intel_crtc_prepare,
        .commit = psb_intel_crtc_commit,
+       .disable = psb_intel_crtc_disable,
 };
 
 const struct drm_crtc_funcs psb_intel_crtc_funcs = {
index 9ebe895c17d6b34dae006a6d392d7911b45eae8b..a2e4953b8e8d5771d6edd95bcaf7f3198674f6b4 100644 (file)
@@ -364,40 +364,64 @@ static const struct pci_device_id pciidlist[] = {         /* aka */
        INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
        INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
        INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
-       INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
+       INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
        INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
        INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
-       INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
+       INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
        INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
        INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
        INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+       INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
+       INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
+       INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
+       INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
+       INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
+       INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
        INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
        INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
-       INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+       INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
        INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
        INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
-       INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+       INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
        INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
        INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
-       INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+       INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
+       INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
+       INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
+       INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
+       INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
+       INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
+       INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
        INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
        INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
-       INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+       INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
        INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
        INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
-       INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+       INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
        INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
        INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
-       INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+       INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
+       INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
+       INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
+       INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
+       INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
+       INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
+       INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
        INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
        INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
-       INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+       INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
        INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
        INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
-       INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+       INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
        INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
        INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
-       INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+       INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
+       INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
+       INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
+       INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
+       INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
+       INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
+       INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
        INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
index d5dcf7fe1ee9b536bee4beb82e4684f0f86b24e2..b9d00dcf9a2d4fcb618c67af82df56ee640f72e2 100644 (file)
@@ -1943,4 +1943,19 @@ static inline void __user *to_user_ptr(u64 address)
        return (void __user *)(uintptr_t)address;
 }
 
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+       unsigned long j = msecs_to_jiffies(m);
+
+       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long
+timespec_to_jiffies_timeout(const struct timespec *value)
+{
+       unsigned long j = timespec_to_jiffies(value);
+
+       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
 #endif
index 6be940effefd1cf3dd69262e826c9cc999e4c26c..970ad17c99ab1092522999a4382737b5dbffbafe 100644 (file)
@@ -91,14 +91,11 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 {
        int ret;
 
-#define EXIT_COND (!i915_reset_in_progress(error))
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+                  i915_terminally_wedged(error))
        if (EXIT_COND)
                return 0;
 
-       /* GPU is already declared terminally dead, give up. */
-       if (i915_terminally_wedged(error))
-               return -EIO;
-
        /*
         * Only wait 10 seconds for the gpu reset to complete to avoid hanging
         * userspace. If it takes that long something really bad is going on and
@@ -1003,7 +1000,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                wait_forever = false;
        }
 
-       timeout_jiffies = timespec_to_jiffies(&wait_time);
+       timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
 
        if (WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
@@ -1045,6 +1042,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (timeout) {
                struct timespec sleep_time = timespec_sub(now, before);
                *timeout = timespec_sub(*timeout, sleep_time);
+               if (!timespec_valid(timeout)) /* i.e. negative time remains */
+                       set_normalized_timespec(timeout, 0, 0);
        }
 
        switch (end) {
@@ -1053,8 +1052,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        case -ERESTARTSYS: /* Signal */
                return (int)end;
        case 0: /* Timeout */
-               if (timeout)
-                       set_normalized_timespec(timeout, 0, 0);
                return -ETIME;
        default: /* Completed */
                WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2377,10 +2374,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        mutex_unlock(&dev->struct_mutex);
 
        ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
-       if (timeout) {
-               WARN_ON(!timespec_valid(timeout));
+       if (timeout)
                args->timeout_ns = timespec_to_ns(timeout);
-       }
        return ret;
 
 out:
index dca614de71b6a34189bb40fa0a99cc0f60fcb6ec..bdb0d7717bc77937dce3c4de563f1e7066c7bb2e 100644 (file)
@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
        return snb_gmch_ctl << 25; /* 32 MB units */
 }
 
-static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
-{
-       static const int stolen_decoder[] = {
-               0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
-       snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
-       snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
-       return stolen_decoder[snb_gmch_ctl] << 20;
-}
-
 static int gen6_gmch_probe(struct drm_device *dev,
                           size_t *gtt_total,
                           size_t *stolen,
@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
        pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
        gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
-       if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
-               *stolen = gen7_get_stolen_size(snb_gmch_ctl);
-       else
-               *stolen = gen6_get_stolen_size(snb_gmch_ctl);
-
+       *stolen = gen6_get_stolen_size(snb_gmch_ctl);
        *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
index 83f9c26e1adbf7b9451e02303545eb1b076b03fa..2d6b62e42daf324478ea64bc49e6e722330c3e2b 100644 (file)
@@ -46,8 +46,6 @@
 #define    SNB_GMCH_GGMS_MASK  0x3
 #define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
 #define    SNB_GMCH_GMS_MASK    0x1f
-#define    IVB_GMCH_GMS_SHIFT   4
-#define    IVB_GMCH_GMS_MASK    0xf
 
 
 /* PCI config space */
index 26a0a570f92e0eba28fa443b28176252a7bb577d..fb961bb81903c95550289845a846bd0308c448d7 100644 (file)
@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
+               if (port != PORT_A)
+                       intel_dp_stop_link_train(intel_dp);
        }
 }
 
@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        } else if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+               if (port == PORT_A)
+                       intel_dp_stop_link_train(intel_dp);
+
                ironlake_edp_backlight_on(intel_dp);
        }
 
index efe8299197555c1ae5def2bc95bf930a4e041a8b..56746dcac40f116fe58c25e648ee9887a0f63a39 100644 (file)
@@ -7937,6 +7937,11 @@ intel_modeset_check_state(struct drm_device *dev)
                memset(&pipe_config, 0, sizeof(pipe_config));
                active = dev_priv->display.get_pipe_config(crtc,
                                                           &pipe_config);
+
+               /* hw state is inconsistent with the pipe A quirk */
+               if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+                       active = crtc->active;
+
                WARN(crtc->active != active,
                     "crtc active state doesn't match with hw state "
                     "(expected %i, found %i)\n", crtc->active, active);
@@ -8140,6 +8145,21 @@ static void intel_set_config_restore_state(struct drm_device *dev,
        }
 }
 
+static bool
+is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
+                     int num_connectors)
+{
+       int i;
+
+       for (i = 0; i < num_connectors; i++)
+               if (connectors[i].encoder &&
+                   connectors[i].encoder->crtc == crtc &&
+                   connectors[i].dpms != DRM_MODE_DPMS_ON)
+                       return true;
+
+       return false;
+}
+
 static void
 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
                                      struct intel_set_config *config)
@@ -8147,7 +8167,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
 
        /* We should be able to check here if the fb has the same properties
         * and then just flip_or_move it */
-       if (set->crtc->fb != set->fb) {
+       if (set->connectors != NULL &&
+           is_crtc_connector_off(set->crtc, *set->connectors,
+                                 set->num_connectors)) {
+                       config->mode_changed = true;
+       } else if (set->crtc->fb != set->fb) {
                /* If we have no fb then treat it as a full mode set */
                if (set->crtc->fb == NULL) {
                        DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
@@ -8157,8 +8181,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
                } else if (set->fb->pixel_format !=
                           set->crtc->fb->pixel_format) {
                        config->mode_changed = true;
-               } else
+               } else {
                        config->fb_changed = true;
+               }
        }
 
        if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
@@ -8332,11 +8357,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 
                ret = intel_set_mode(set->crtc, set->mode,
                                     set->x, set->y, set->fb);
-               if (ret) {
-                       DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
-                                 set->crtc->base.id, ret);
-                       goto fail;
-               }
        } else if (config->fb_changed) {
                intel_crtc_wait_for_pending_flips(set->crtc);
 
@@ -8344,18 +8364,18 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
                                          set->x, set->y, set->fb);
        }
 
-       intel_set_config_free(config);
-
-       return 0;
-
+       if (ret) {
+               DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+                         set->crtc->base.id, ret);
 fail:
-       intel_set_config_restore_state(dev, config);
+               intel_set_config_restore_state(dev, config);
 
-       /* Try to restore the config */
-       if (config->mode_changed &&
-           intel_set_mode(save_set.crtc, save_set.mode,
-                          save_set.x, save_set.y, save_set.fb))
-               DRM_ERROR("failed to restore config after modeset failure\n");
+               /* Try to restore the config */
+               if (config->mode_changed &&
+                   intel_set_mode(save_set.crtc, save_set.mode,
+                                  save_set.x, save_set.y, save_set.fb))
+                       DRM_ERROR("failed to restore config after modeset failure\n");
+       }
 
 out_config:
        intel_set_config_free(config);
index fb2fbc1e08b9ba0048b17c02badb8fd8ed29d089..70789b1b564282b9ba7df39c73283e8b532d67ad 100644 (file)
@@ -303,7 +303,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
        if (has_aux_irq)
                done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
-                                         msecs_to_jiffies(10));
+                                         msecs_to_jiffies_timeout(10));
        else
                done = wait_for_atomic(C, 10) == 0;
        if (!done)
@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
+       if (is_edp(intel_dp) && dev_priv->edp.bpp)
+               bpp = min_t(int, bpp, dev_priv->edp.bpp);
+
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(target_clock, bpp);
 
@@ -739,6 +742,7 @@ found:
        intel_dp->link_bw = bws[clock];
        intel_dp->lane_count = lane_count;
        adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+       pipe_config->pipe_bpp = bpp;
        pipe_config->pixel_target_clock = target_clock;
 
        DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -751,20 +755,6 @@ found:
                               target_clock, adjusted_mode->clock,
                               &pipe_config->dp_m_n);
 
-       /*
-        * XXX: We have a strange regression where using the vbt edp bpp value
-        * for the link bw computation results in black screens, the panel only
-        * works when we do the computation at the usual 24bpp (but still
-        * requires us to use 18bpp). Until that's fully debugged, stay
-        * bug-for-bug compatible with the old code.
-        */
-       if (is_edp(intel_dp) && dev_priv->edp.bpp) {
-               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
-                             bpp, dev_priv->edp.bpp);
-               bpp = min_t(int, bpp, dev_priv->edp.bpp);
-       }
-       pipe_config->pipe_bpp = bpp;
-
        return true;
 }
 
@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        ironlake_edp_panel_on(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
+       intel_dp_stop_link_train(intel_dp);
        ironlake_edp_backlight_on(intel_dp);
 }
 
@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
        int ret;
-       uint32_t temp;
 
        if (HAS_DDI(dev)) {
-               temp = I915_READ(DP_TP_CTL(port));
+               uint32_t temp = I915_READ(DP_TP_CTL(port));
 
                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
                        temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
                case DP_TRAINING_PATTERN_DISABLE:
-
-                       if (port != PORT_A) {
-                               temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
-                               I915_WRITE(DP_TP_CTL(port), temp);
-
-                               if (wait_for((I915_READ(DP_TP_STATUS(port)) &
-                                             DP_TP_STATUS_IDLE_DONE), 1))
-                                       DRM_ERROR("Timed out waiting for DP idle patterns\n");
-
-                               temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-                       }
-
                        temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
                        break;
@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        return true;
 }
 
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum port port = intel_dig_port->port;
+       uint32_t val;
+
+       if (!HAS_DDI(dev))
+               return;
+
+       val = I915_READ(DP_TP_CTL(port));
+       val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+       val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+       I915_WRITE(DP_TP_CTL(port), val);
+
+       /*
+        * On PORT_A we can have only eDP in SST mode. There the only reason
+        * we need to set idle transmission mode is to work around a HW issue
+        * where we enable the pipe while not in idle link-training mode.
+        * In this case there is requirement to wait for a minimum number of
+        * idle patterns to be sent.
+        */
+       if (port == PORT_A)
+               return;
+
+       if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+                    1))
+               DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
 /* Enable corresponding port and start training pattern 1 */
 void
 intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                ++tries;
        }
 
+       intel_dp_set_idle_link_train(intel_dp);
+
+       intel_dp->DP = DP;
+
        if (channel_eq)
                DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
 
-       intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+       intel_dp_set_link_train(intel_dp, intel_dp->DP,
+                               DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                              drm_get_encoder_name(&intel_encoder->base));
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
+               intel_dp_stop_link_train(intel_dp);
        }
 }
 
index b5b6d19e6dd3ff30293799980d1efafef6540812..624a9e6b8d718ebe64a3aa41019137b673b3b296 100644 (file)
@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
 extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
 extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
index 0e19e575a1b41e456cbe3e82877502fa1906c875..6b7c3ca2c035e5514c2c333877966ff1cb3d3cfd 100644 (file)
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
 void intel_fbdev_set_suspend(struct drm_device *dev, int state)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       if (!dev_priv->fbdev)
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+       struct fb_info *info;
+
+       if (!ifbdev)
                return;
 
-       fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+       info = ifbdev->helper.fbdev;
+
+       /* On resume from hibernation: If the object is shmemfs backed, it has
+        * been restored from swap. If the object is stolen however, it will be
+        * full of whatever garbage was left in there.
+        */
+       if (!state && ifbdev->ifb.obj->stolen)
+               memset_io(info->screen_base, 0, info->screen_size);
+
+       fb_set_suspend(info, state);
 }
 
 MODULE_LICENSE("GPL and additional rights");
index 5d245031e391a464fbe8eb6fb6031e70e1ac6770..639fe192997cdecb602c789f97045aad61328249 100644 (file)
@@ -228,7 +228,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
         * need to wake up periodically and check that ourselves. */
        I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
 
-       for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
+       for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
                prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
                                TASK_UNINTERRUPTIBLE);
 
@@ -263,7 +263,8 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
        /* Important: The hw handles only the first bit, so set only one! */
        I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
 
-       ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+       ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+                                msecs_to_jiffies_timeout(10));
 
        I915_WRITE(GMBUS4 + reg_offset, 0);
 
index f36f1baabd5ad6ced3522148ddb49d9f6cee074b..29412cc89c7aa71999c8da1f52c1ae543d766e53 100644 (file)
@@ -815,10 +815,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
        },
        {
                .callback = intel_no_lvds_dmi_callback,
-               .ident = "Hewlett-Packard HP t5740e Thin Client",
+               .ident = "Hewlett-Packard HP t5740",
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
                },
        },
        {
index de3b0dc5658bcf0c84081ec23ee1cc0add9d93c5..aa01128ff192cc6c5860c881a9aa8a71cb756fd7 100644 (file)
@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
 
        vlv_update_drain_latency(dev);
 
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &valleyview_wm_info, latency_ns,
                            &valleyview_cursor_wm_info, latency_ns,
                            &planea_wm, &cursora_wm))
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &valleyview_wm_info, latency_ns,
                            &valleyview_cursor_wm_info, latency_ns,
                            &planeb_wm, &cursorb_wm))
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
 
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
 
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &g4x_wm_info, latency_ns,
                            &g4x_cursor_wm_info, latency_ns,
                            &planea_wm, &cursora_wm))
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &g4x_wm_info, latency_ns,
                            &g4x_cursor_wm_info, latency_ns,
                            &planeb_wm, &cursorb_wm))
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
 
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &ironlake_display_wm_info,
                            ILK_LP0_PLANE_LATENCY,
                            &ironlake_cursor_wm_info,
@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &ironlake_display_wm_info,
                            ILK_LP0_PLANE_LATENCY,
                            &ironlake_cursor_wm_info,
@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
        /*
@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
        /*
@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
-       if (g4x_compute_wm0(dev, 2,
+       if (g4x_compute_wm0(dev, PIPE_C,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 3;
+               enabled |= 1 << PIPE_C;
        }
 
        /*
index d15428404b9a22cd5cf20566777dd4785d5f9f90..d4ea6c265ce113ca4a179f41ff2ba061ceff62e0 100644 (file)
@@ -1776,11 +1776,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
         * Assume that the preferred modes are
         * arranged in priority order.
         */
-       intel_ddc_get_modes(connector, intel_sdvo->i2c);
-       if (list_empty(&connector->probed_modes) == false)
-               goto end;
+       intel_ddc_get_modes(connector, &intel_sdvo->ddc);
 
-       /* Fetch modes from VBT */
+       /*
+        * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+        * SDVO->LVDS transcoders can't cope with the EDID mode. Since
+        * drm_mode_probed_add adds the mode at the head of the list we add it
+        * last.
+        */
        if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
                newmode = drm_mode_duplicate(connector->dev,
                                             dev_priv->sdvo_lvds_vbt_mode);
@@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
                }
        }
 
-end:
        list_for_each_entry(newmode, &connector->probed_modes, head) {
                if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
                        intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                        SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
        }
 
-       /* Only enable the hotplug irq if we need it, to work around noisy
-        * hotplug lines.
-        */
-       if (intel_sdvo->hotplug_active)
-               intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
-
        intel_encoder->compute_config = intel_sdvo_compute_config;
        intel_encoder->disable = intel_disable_sdvo;
        intel_encoder->mode_set = intel_sdvo_mode_set;
@@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                goto err_output;
        }
 
+       /* Only enable the hotplug irq if we need it, to work around noisy
+        * hotplug lines.
+        */
+       if (intel_sdvo->hotplug_active) {
+               intel_encoder->hpd_pin =
+                       intel_sdvo->is_sdvob ?  HPD_SDVO_B : HPD_SDVO_C;
+       }
+
        /*
         * Cloning SDVO with anything is often impossible, since the SDVO
         * encoder can request a special input timing mode. And even if that's
index f9889658329bfd1aa613cc595a143511e9f94386..ee66badc8bb63b4b49e61744a40ad213e6330954 100644 (file)
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
 
 static inline void mga_wait_vsync(struct mga_device *mdev)
 {
-       unsigned int count = 0;
+       unsigned long timeout = jiffies + HZ/10;
        unsigned int status = 0;
 
        do {
                status = RREG32(MGAREG_Status);
-               count++;
-       } while ((status & 0x08) && (count < 250000));
-       count = 0;
+       } while ((status & 0x08) && time_before(jiffies, timeout));
+       timeout = jiffies + HZ/10;
        status = 0;
        do {
                status = RREG32(MGAREG_Status);
-               count++;
-       } while (!(status & 0x08) && (count < 250000));
+       } while (!(status & 0x08) && time_before(jiffies, timeout));
 }
 
 static inline void mga_wait_busy(struct mga_device *mdev)
 {
-       unsigned int count = 0;
+       unsigned long timeout = jiffies + HZ;
        unsigned int status = 0;
        do {
                status = RREG8(MGAREG_Status + 2);
-               count++;
-       } while ((status & 0x01) && (count < 500000));
+       } while ((status & 0x01) && time_before(jiffies, timeout));
 }
 
 /*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_REMHEADCTL_CLKDIS;
-               WREG_DAC(MGA1064_REMHEADCTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                /* select PLL Set C */
                tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(500);
 
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_VREF_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~0x04;
-               WREG_DAC(MGA1064_VREF_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(50);
 
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
                tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
                tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
-               WREG_DAC(MGA1064_REMHEADCTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                /* reset dotclock rate bit */
                WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                vcount = RREG8(MGAREG_VCOUNT);
 
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+       WREG8(DAC_DATA, tmp);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 
        WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
        tmp = RREG8(DAC_DATA);
-       WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+       WREG8(DAC_DATA, tmp & ~0x40);
 
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
        WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        udelay(500);
 
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
        tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
        tmp = RREG8(DAC_DATA);
-       WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+       WREG8(DAC_DATA, tmp | 0x40);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        return 0;
 }
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+               WREG8(DAC_DATA, tmp);
 
                tmp = RREG8(MGAREG_MEM_MISC_READ);
                tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(500);
 
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
                tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                vcount = RREG8(MGAREG_VCOUNT);
 
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_REMHEADCTL_CLKDIS;
-       WREG_DAC(MGA1064_REMHEADCTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
        tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        udelay(500);
 
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
        WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
 }
 
-
+/*
+   This is how the framebuffer base address is stored in g200 cards:
+   * Assume @offset is the gpu_addr variable of the framebuffer object
+   * Then addr is the number of _pixels_ (not bytes) from the start of
+     VRAM to the first pixel we want to display. (divided by 2 for 32bit
+     framebuffers)
+   * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+   addr<20> -> CRTCEXT0<6>
+   addr<19-16> -> CRTCEXT0<3-0>
+   addr<15-8> -> CRTCC<7-0>
+   addr<7-0> -> CRTCD<7-0>
+   CRTCEXT0 has to be programmed last to trigger an update and make the
+   new addr variable take effect.
+ */
 void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
        struct mga_device *mdev = crtc->dev->dev_private;
        u32 addr;
        int count;
+       u8 crtcext0;
 
        while (RREG8(0x1fda) & 0x08);
        while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
        count = RREG8(MGAREG_VCOUNT) + 2;
        while (RREG8(MGAREG_VCOUNT) < count);
 
-       addr = offset >> 2;
+       WREG8(MGAREG_CRTCEXT_INDEX, 0);
+       crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
+       crtcext0 &= 0xB0;
+       addr = offset / 8;
+       /* Can't store addresses any higher than that...
+          but we also don't have more than 16MB of memory, so it should be fine. */
+       WARN_ON(addr > 0x1fffff);
+       crtcext0 |= (!!(addr & (1<<20)))<<6;
        WREG_CRT(0x0d, (u8)(addr & 0xff));
        WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
-       WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+       WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
 }
 
 
@@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 
 
        for (i = 0; i < sizeof(dacvalue); i++) {
-               if ((i <= 0x03) ||
-                   (i == 0x07) ||
-                   (i == 0x0b) ||
-                   (i == 0x0f) ||
-                   ((i >= 0x13) && (i <= 0x17)) ||
+               if ((i <= 0x17) ||
                    (i == 0x1b) ||
                    (i == 0x1c) ||
                    ((i >= 0x1f) && (i <= 0x29)) ||
@@ -1020,13 +1034,14 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
                        else
                                hi_pri_lvl = 5;
 
-                       WREG8(0x1fde, 0x06);
-                       WREG8(0x1fdf, hi_pri_lvl);
+                       WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+                       WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
                } else {
+                       WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
                        if (mdev->reg_1e24 >= 0x01)
-                               WREG8(0x1fdf, 0x03);
+                               WREG8(MGAREG_CRTCEXT_DATA, 0x03);
                        else
-                               WREG8(0x1fdf, 0x04);
+                               WREG8(MGAREG_CRTCEXT_DATA, 0x04);
                }
        }
        return 0;
index 955af122c3a68dc06eb0cea0360e746447416966..a36e64e98ef372a91566da1d6fb0fff6ebef400f 100644 (file)
@@ -138,7 +138,6 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
-               device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xce:
@@ -225,7 +224,6 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
-               device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc8:
index d0817d94454ceb0d17536995e0c053b8585fdf72..f02fd9f443fff3e1178b40cedda50ee4e9ba5461 100644 (file)
@@ -50,11 +50,16 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
 {
        const u32 doff = (or * 0x800);
        int load = -EINVAL;
+       nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
+       nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
        nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
-       udelay(9500);
+       mdelay(9);
+       udelay(500);
        nv_wr32(priv, 0x61a00c + doff, 0x80000000);
        load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
        nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+       nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
+       nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
        return load;
 }
 
index 0d36bdc51417eb65a234c5d901195ddabbef2528..7fdade6e604da6918fd16735baf0ba2b0c0d4d62 100644 (file)
@@ -55,6 +55,10 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
        nv_wr32(priv, 0x616510 + hoff, 0x00000000);
        nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
 
+       nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+       nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+       nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
        /* ??? */
        nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
        nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
index ddaeb55729038ee4572b17c78c6ec94198ac0280..e9b8217d00759174cabda39655a79fe098d5dbbf 100644 (file)
@@ -40,8 +40,8 @@
  * FIFO channel objects
  ******************************************************************************/
 
-void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+static void
+nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
 {
        struct nouveau_bar *bar = nouveau_bar(priv);
        struct nouveau_gpuobj *cur;
@@ -62,6 +62,14 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
        nv_wr32(priv, 0x002500, 0x00000101);
 }
 
+void
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+{
+       mutex_lock(&nv_subdev(priv)->mutex);
+       nv50_fifo_playlist_update_locked(priv);
+       mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
 static int
 nv50_fifo_context_attach(struct nouveau_object *parent,
                         struct nouveau_object *object)
@@ -487,7 +495,7 @@ nv50_fifo_init(struct nouveau_object *object)
 
        for (i = 0; i < 128; i++)
                nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
-       nv50_fifo_playlist_update(priv);
+       nv50_fifo_playlist_update_locked(priv);
 
        nv_wr32(priv, 0x003200, 0x00000001);
        nv_wr32(priv, 0x003250, 0x00000001);
index 4d4a6b905370b2633b6afb199de3eb3973943ece..46dfa68c47bbd84ca4c30d07955111aab2bb29b5 100644 (file)
@@ -71,6 +71,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
        struct nouveau_gpuobj *cur;
        int i, p;
 
+       mutex_lock(&nv_subdev(priv)->mutex);
        cur = priv->playlist[priv->cur_playlist];
        priv->cur_playlist = !priv->cur_playlist;
 
@@ -87,6 +88,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
        nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
        if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
                nv_error(priv, "playlist update failed\n");
+       mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
 static int
@@ -248,9 +250,17 @@ nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
        struct nvc0_fifo_priv *priv = (void *)object->engine;
        struct nvc0_fifo_chan *chan = (void *)object;
        u32 chid = chan->base.chid;
+       u32 mask, engine;
 
        nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
        nvc0_fifo_playlist_update(priv);
+       mask = nv_rd32(priv, 0x0025a4);
+       for (engine = 0; mask && engine < 16; engine++) {
+               if (!(mask & (1 << engine)))
+                       continue;
+               nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000);
+               mask &= ~(1 << engine);
+       }
        nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
 
        return nouveau_fifo_channel_fini(&chan->base, suspend);
index 9151919fb83152a3c5035299f04fcbbd472b74f9..56192a7242aee51ab1fdbb575b8463a47a41acb7 100644 (file)
@@ -94,11 +94,13 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
        u32 match = (engine << 16) | 0x00000001;
        int i, p;
 
+       mutex_lock(&nv_subdev(priv)->mutex);
        cur = engn->playlist[engn->cur_playlist];
        if (unlikely(cur == NULL)) {
                int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
                                             0x8000, 0x1000, 0, &cur);
                if (ret) {
+                       mutex_unlock(&nv_subdev(priv)->mutex);
                        nv_error(priv, "playlist alloc failed\n");
                        return;
                }
@@ -122,6 +124,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
        nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
        if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
                nv_error(priv, "playlist %d update timeout\n", engine);
+       mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
 static int
index 0a393f7f055fedafc0fc8798a462ad7290c3e79d..5a5961b6a6a3b28f3775b035c03d6f6f74af25d5 100644 (file)
@@ -218,7 +218,7 @@ struct nv04_display_class {
 #define NV50_DISP_DAC_PWR_STATE                                      0x00000040
 #define NV50_DISP_DAC_PWR_STATE_ON                                   0x00000000
 #define NV50_DISP_DAC_PWR_STATE_OFF                                  0x00000040
-#define NV50_DISP_DAC_LOAD                                           0x0002000c
+#define NV50_DISP_DAC_LOAD                                           0x00020100
 #define NV50_DISP_DAC_LOAD_VALUE                                     0x00000007
 
 #define NV50_DISP_PIOR_MTHD                                          0x00030000
index c300b5e7b67048550c4d019bc94b31dc9c0ac6a9..c434d398d16f0302bec862c95b6421cb8e505652 100644 (file)
@@ -1940,8 +1940,8 @@ init_zm_mask_add(struct nvbios_init *init)
        trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
        init->offset += 13;
 
-       data  =  init_rd32(init, addr) & mask;
-       data |= ((data + add) & ~mask);
+       data =  init_rd32(init, addr);
+       data = (data & mask) | ((data + add) & ~mask);
        init_wr32(init, addr, data);
 }
 
index e4940fb166e8ead8280d7d4f4ee9659aa8b3d133..fb794e997fbccdeef0f03a460f0112766d1cc8fd 100644 (file)
@@ -29,7 +29,6 @@
 struct nvc0_ltcg_priv {
        struct nouveau_ltcg base;
        u32 part_nr;
-       u32 part_mask;
        u32 subp_nr;
        struct nouveau_mm tags;
        u32 num_tags;
@@ -105,8 +104,6 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
 
        /* wait until it's finished with clearing */
        for (p = 0; p < priv->part_nr; ++p) {
-               if (!(priv->part_mask & (1 << p)))
-                       continue;
                for (i = 0; i < priv->subp_nr; ++i)
                        nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
        }
@@ -121,6 +118,8 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
        int ret;
 
        nv_wr32(priv, 0x17e8d8, priv->part_nr);
+       if (nv_device(pfb)->card_type >= NV_E0)
+               nv_wr32(priv, 0x17e000, priv->part_nr);
 
        /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
        priv->num_tags = (pfb->ram.size >> 17) / 4;
@@ -167,16 +166,20 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 {
        struct nvc0_ltcg_priv *priv;
        struct nouveau_fb *pfb = nouveau_fb(parent);
-       int ret;
+       u32 parts, mask;
+       int ret, i;
 
        ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       priv->part_nr = nv_rd32(priv, 0x022438);
-       priv->part_mask = nv_rd32(priv, 0x022554);
-
+       parts = nv_rd32(priv, 0x022438);
+       mask = nv_rd32(priv, 0x022554);
+       for (i = 0; i < parts; i++) {
+               if (!(mask & (1 << i)))
+                       priv->part_nr++;
+       }
        priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
 
        nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
index 7bf22d4a3d9679c1553e0b9846802abf11c729be..f17dc2ab03ecd25a762e52840f02b8f4652345d4 100644 (file)
@@ -638,17 +638,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        }
 
        s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
-       if (s->event) {
-               struct drm_pending_vblank_event *e = s->event;
-               struct timeval now;
-
-               do_gettimeofday(&now);
-               e->event.sequence = 0;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
-               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
+       if (s->event)
+               drm_send_vblank_event(dev, -1, s->event);
 
        list_del(&s->head);
        if (ps)
index 46c152ff0a80f41a102a332a2378867cf0e88f3a..383f4e6ea9d164c59509c7788666cf89fb6abbe0 100644 (file)
@@ -453,18 +453,32 @@ nouveau_do_suspend(struct drm_device *dev)
        NV_INFO(drm, "evicting buffers...\n");
        ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 
+       NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+       if (drm->cechan) {
+               ret = nouveau_channel_idle(drm->cechan);
+               if (ret)
+                       return ret;
+       }
+
+       if (drm->channel) {
+               ret = nouveau_channel_idle(drm->channel);
+               if (ret)
+                       return ret;
+       }
+
+       NV_INFO(drm, "suspending client object trees...\n");
        if (drm->fence && nouveau_fence(drm)->suspend) {
                if (!nouveau_fence(drm)->suspend(drm))
                        return -ENOMEM;
        }
 
-       NV_INFO(drm, "suspending client object trees...\n");
        list_for_each_entry(cli, &drm->clients, head) {
                ret = nouveau_client_fini(&cli->base, true);
                if (ret)
                        goto fail_client;
        }
 
+       NV_INFO(drm, "suspending kernel object tree...\n");
        ret = nouveau_client_fini(&drm->client.base, true);
        if (ret)
                goto fail_client;
@@ -514,17 +528,18 @@ nouveau_do_resume(struct drm_device *dev)
 
        nouveau_agp_reset(drm);
 
-       NV_INFO(drm, "resuming client object trees...\n");
+       NV_INFO(drm, "resuming kernel object tree...\n");
        nouveau_client_init(&drm->client.base);
        nouveau_agp_init(drm);
 
+       NV_INFO(drm, "resuming client object trees...\n");
+       if (drm->fence && nouveau_fence(drm)->resume)
+               nouveau_fence(drm)->resume(drm);
+
        list_for_each_entry(cli, &drm->clients, head) {
                nouveau_client_init(&cli->base);
        }
 
-       if (drm->fence && nouveau_fence(drm)->resume)
-               nouveau_fence(drm)->resume(drm);
-
        nouveau_run_vbios_init(dev);
        nouveau_pm_resume(dev);
 
index ebf0a683305ec18aad76a31822b96334c997f53f..dd5e01f89f284cd8aaf9df8b830816092055dc8b 100644 (file)
@@ -1554,7 +1554,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 {
        struct nv50_disp *disp = nv50_disp(encoder->dev);
        int ret, or = nouveau_encoder(encoder)->or;
-       u32 load = 0;
+       u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
+       if (load == 0)
+               load = 340;
 
        ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
        if (ret || load != 7)
index 9c53c25e5201763a4673734d5ce32d72043f5a35..826586ffbe835d94983f779b372149d92143b863 100644 (file)
@@ -649,6 +649,9 @@ static void pdev_shutdown(struct platform_device *device)
 
 static int pdev_probe(struct platform_device *device)
 {
+       if (omapdss_is_initialized() == false)
+               return -EPROBE_DEFER;
+
        DBG("%s", device->name);
        return drm_platform_init(&omap_drm_driver, device);
 }
index 2f1a57e11140adac1e8850abdd348092da07f083..d6c12796023cd654e95196bc9d55ce18aabdb114 100644 (file)
@@ -4,6 +4,7 @@ config DRM_QXL
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
+       select FB_DEFERRED_IO
         select DRM_KMS_HELPER
         select DRM_TTM
        help
index 08b0823c93d526405d506aca4849f2c83a9d9eb5..f86771481317b77d565675c956d77fdcb22f2789 100644 (file)
@@ -277,7 +277,7 @@ out_unref:
        return 0;
 }
 
-static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
 {
        int irq_num;
        long addr = qdev->io_base + port;
@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
 
        mutex_lock(&qdev->async_io_mutex);
        irq_num = atomic_read(&qdev->irq_received_io_cmd);
-
-
        if (qdev->last_sent_io_cmd > irq_num) {
-               ret = wait_event_interruptible(qdev->io_cmd_event,
-                                              atomic_read(&qdev->irq_received_io_cmd) > irq_num);
-               if (ret)
+               if (intr)
+                       ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+                                                              atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+               else
+                       ret = wait_event_timeout(qdev->io_cmd_event,
+                                                atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+               /* 0 is timeout, just bail the "hw" has gone away */
+               if (ret <= 0)
                        goto out;
                irq_num = atomic_read(&qdev->irq_received_io_cmd);
        }
        outb(val, addr);
        qdev->last_sent_io_cmd = irq_num + 1;
-       ret = wait_event_interruptible(qdev->io_cmd_event,
-                                      atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+       if (intr)
+               ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+                                                      atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+       else
+               ret = wait_event_timeout(qdev->io_cmd_event,
+                                        atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
 out:
+       if (ret > 0)
+               ret = 0;
        mutex_unlock(&qdev->async_io_mutex);
        return ret;
 }
@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
        int ret;
 
 restart:
-       ret = wait_for_io_cmd_user(qdev, val, port);
+       ret = wait_for_io_cmd_user(qdev, val, port, false);
        if (ret == -ERESTARTSYS)
                goto restart;
 }
@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
        mutex_lock(&qdev->update_area_mutex);
        qdev->ram_header->update_area = *area;
        qdev->ram_header->update_surface = surface_id;
-       ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
+       ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
        mutex_unlock(&qdev->update_area_mutex);
        return ret;
 }
index fcfd4436ceedd50f335afdc84edac0001036e2d8..823d29e926ec0aa6c9715b4cb93448b3c4e90215 100644 (file)
@@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
        int inc = 1;
 
        qobj = gem_to_qxl_bo(qxl_fb->obj);
-       if (qxl_fb != qdev->active_user_framebuffer) {
-               DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
-                       __func__, qxl_fb, qdev->active_user_framebuffer);
-       }
+       /* if we aren't primary surface ignore this */
+       if (!qobj->is_primary)
+               return 0;
+
        if (!num_clips) {
                num_clips = 1;
                clips = &norect;
@@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
                                               mode->hdisplay,
                                               mode->vdisplay);
        }
-       qdev->mode_set = true;
        return 0;
 }
 
@@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
 {
        struct drm_gem_object *obj;
        struct qxl_framebuffer *qxl_fb;
-       struct qxl_device *qdev = dev->dev_private;
        int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
@@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
                return NULL;
        }
 
-       if (qdev->active_user_framebuffer) {
-               DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
-                        __func__,
-                        qdev->active_user_framebuffer, qxl_fb);
-       }
-       qdev->active_user_framebuffer = qxl_fb;
-
        return &qxl_fb->base;
 }
 
index 52b582c211da9dc8e8ff4f28dbd52c8a0242c23a..43d06ab28a21195c5c81e03bd3f1b1b838381c80 100644 (file)
@@ -255,12 +255,6 @@ struct qxl_device {
        struct qxl_gem          gem;
        struct qxl_mode_info mode_info;
 
-       /*
-        * last created framebuffer with fb_create
-        * only used by debugfs dumbppm
-        */
-       struct qxl_framebuffer *active_user_framebuffer;
-
        struct fb_info                  *fbdev_info;
        struct qxl_framebuffer  *fbdev_qfb;
        void *ram_physical;
@@ -270,7 +264,6 @@ struct qxl_device {
        struct qxl_ring *cursor_ring;
 
        struct qxl_ram_header *ram_header;
-       bool mode_set;
 
        bool primary_created;
 
index 04b64f9cbfdb94b2c6dc1fa5e09a2df1c2b0b2cc..a4b71b25fa5366c948f8447e6c7b6f0808b4aab4 100644 (file)
@@ -151,7 +151,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
                struct qxl_bo *cmd_bo;
                int release_type;
                struct drm_qxl_command *commands =
-                       (struct drm_qxl_command *)execbuffer->commands;
+                       (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
 
                if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
                                       sizeof(user_cmd)))
@@ -193,7 +193,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
 
                for (i = 0 ; i < user_cmd.relocs_num; ++i) {
                        if (DRM_COPY_FROM_USER(&reloc,
-                                              &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
+                                              &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
                                               sizeof(reloc))) {
                                qxl_bo_list_unreserve(&reloc_list, true);
                                qxl_release_unreserve(qdev, release);
@@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
                goto out;
 
        if (!qobj->pin_count) {
+               qxl_ttm_placement_from_domain(qobj, qobj->type);
                ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
                                      true, false);
                if (unlikely(ret))
index 85127ed24cfd3481b72428b84cd54da5c12134dc..e27ce2a907cf798c7027c15abbd270d166ef9dfa 100644 (file)
@@ -128,12 +128,13 @@ int qxl_device_init(struct qxl_device *qdev,
 
        qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
        qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
-       DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
-                (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
+       DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
+                (unsigned long long)qdev->vram_base,
+                (unsigned long long)pci_resource_end(pdev, 0),
                 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
                 (int)pci_resource_len(pdev, 0) / 1024,
-                (void *)qdev->surfaceram_base,
-                (void *)pci_resource_end(pdev, 1),
+                (unsigned long long)qdev->surfaceram_base,
+                (unsigned long long)pci_resource_end(pdev, 1),
                 (int)qdev->surfaceram_size / 1024 / 1024,
                 (int)qdev->surfaceram_size / 1024);
 
index 6d6fdb3ba0d07d859b8c0fba223c0fe8193b1e46..d5df8fd1021755ad1ca39543bc0218ea9531b5ae 100644 (file)
@@ -1811,12 +1811,9 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
 
 static void atombios_crtc_prepare(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
 
-       radeon_crtc->in_mode_set = true;
-
        /* disable crtc pair power gating before programming */
        if (ASIC_IS_DCE6(rdev))
                atombios_powergate_crtc(crtc, ATOM_DISABLE);
@@ -1827,11 +1824,8 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
 
 static void atombios_crtc_commit(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
        atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
        atombios_lock_crtc(crtc, ATOM_DISABLE);
-       radeon_crtc->in_mode_set = false;
 }
 
 static void atombios_crtc_disable(struct drm_crtc *crtc)
index 44a7da66e0810ee489b46c4c1c500af639eeb848..8406c8251fbfd074078fa79cb0e3f3806fb49779 100644 (file)
@@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
 int
 atombios_get_encoder_mode(struct drm_encoder *encoder)
 {
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
@@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
                if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                   radeon_audio)
+                   radeon_audio &&
+                   !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
                        return ATOM_ENCODER_MODE_HDMI;
                else if (radeon_connector->use_digital)
                        return ATOM_ENCODER_MODE_DVI;
@@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
                if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                   radeon_audio)
+                   radeon_audio &&
+                   !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
                        return ATOM_ENCODER_MODE_HDMI;
                else
                        return ATOM_ENCODER_MODE_DVI;
@@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
                        return ATOM_ENCODER_MODE_DP;
                else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                        radeon_audio)
+                        radeon_audio &&
+                        !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
                        return ATOM_ENCODER_MODE_HDMI;
                else
                        return ATOM_ENCODER_MODE_DVI;
index 105bafb6c29d8aab2a6d22018d6a48c86a805cf4..0f89ce3d02b90d7640c47a9b42f137001c76bcaf 100644 (file)
@@ -2343,11 +2343,13 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
        u32 crtc_enabled, tmp, frame_count, blackout;
        int i, j;
 
-       save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
-       save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+       if (!ASIC_IS_NODCE(rdev)) {
+               save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+               save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
 
-       /* disable VGA render */
-       WREG32(VGA_RENDER_CONTROL, 0);
+               /* disable VGA render */
+               WREG32(VGA_RENDER_CONTROL, 0);
+       }
        /* blank the display controllers */
        for (i = 0; i < rdev->num_crtc; i++) {
                crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
@@ -2438,8 +2440,11 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
                WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
                       (u32)rdev->mc.vram_start);
        }
-       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
-       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+               WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+       }
 
        /* unlock regs and wait for update */
        for (i = 0; i < rdev->num_crtc; i++) {
@@ -2499,10 +2504,12 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
                        }
                }
        }
-       /* Unlock vga access */
-       WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
-       mdelay(1);
-       WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+       if (!ASIC_IS_NODCE(rdev)) {
+               /* Unlock vga access */
+               WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+               mdelay(1);
+               WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+       }
 }
 
 void evergreen_mc_program(struct radeon_device *rdev)
@@ -3405,8 +3412,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
                rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        } else {
                /* size in MB on evergreen/cayman/tn */
-               rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-               rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+               rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+               rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
        }
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        r700_vram_gtt_location(rdev, &rdev->mc);
@@ -4747,6 +4754,12 @@ static int evergreen_startup(struct radeon_device *rdev)
                rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r = r600_irq_init(rdev);
        if (r) {
                DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -4916,10 +4929,6 @@ int evergreen_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       r = radeon_irq_kms_init(rdev);
-       if (r)
-               return r;
-
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
@@ -4992,8 +5001,7 @@ void evergreen_fini(struct radeon_device *rdev)
 
 void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
 {
-       u32 link_width_cntl, speed_cntl, mask;
-       int ret;
+       u32 link_width_cntl, speed_cntl;
 
        if (radeon_pcie_gen2 == 0)
                return;
@@ -5008,11 +5016,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
        if (ASIC_IS_X2(rdev))
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
-               return;
-
-       if (!(mask & DRM_PCIE_SPEED_50))
+       if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+               (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
                return;
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
index b4ab8ceb16545d115d6b39428e54c42a6f6ce1d2..ed7c8a7680929e5589fe84daa70f36d2e3bce74a 100644 (file)
@@ -154,19 +154,18 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-       u32 base_rate = 48000;
+       u32 base_rate = 24000;
 
        if (!dig || !dig->afmt)
                return;
 
-       /* XXX: properly calculate this */
        /* XXX two dtos; generally use dto0 for hdmi */
        /* Express [24MHz / target pixel clock] as an exact rational
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
-       WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
-       WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
+       WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+       WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
        WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
 }
 
index 7969c0c8ec200be015909a3dada502c0b09af024..84583302b08162058d3ce0c2bfed31ddf1b2b44e 100644 (file)
@@ -2025,6 +2025,12 @@ static int cayman_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r = r600_irq_init(rdev);
        if (r) {
                DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2190,10 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       r = radeon_irq_kms_init(rdev);
-       if (r)
-               return r;
-
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
 
index 4973bff37fec8ae9d5974a6f8e4f63811ee914f5..d0314ecbd7c18ecbd0f250b48efb3023eaf2d3ae 100644 (file)
@@ -3869,6 +3869,12 @@ static int r100_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r100_irq_set(rdev);
        rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -4022,9 +4028,6 @@ int r100_init(struct radeon_device *rdev)
        r100_mc_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
-       if (r)
-               return r;
-       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        /* Memory manager */
index c60350e6872ddebdb5bc1bd923f6b2d4d6bc59fd..b9b776f1e5822cac4a2bc563bcbea886097ce9e6 100644 (file)
@@ -1382,6 +1382,12 @@ static int r300_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -1514,9 +1520,6 @@ int r300_init(struct radeon_device *rdev)
        r300_mc_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
-       if (r)
-               return r;
-       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        /* Memory manager */
index 865e2c9980dbd5b2245a40ed23cdbb19af1044d5..60170ea5e3a228c0483c18f0fd1652b30ec7760b 100644 (file)
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
                OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
 
                for (i = 0; i < nr; ++i) {
-                       if (DRM_COPY_FROM_USER_UNCHECKED
+                       if (DRM_COPY_FROM_USER
                            (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
                                DRM_ERROR("copy cliprect faulted\n");
                                return -EFAULT;
index 6fce2eb4dd16b3269625c43ec767c634685ec63a..4e796ecf9ea4770e2388a56032cd135f5e861605 100644 (file)
@@ -265,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -411,10 +417,6 @@ int r420_init(struct radeon_device *rdev)
        if (r) {
                return r;
        }
-       r = radeon_irq_kms_init(rdev);
-       if (r) {
-               return r;
-       }
        /* Memory manager */
        r = radeon_bo_init(rdev);
        if (r) {
index f795a4e092cb724374e5440775f37a37a87373d1..e1aece73b370c1ec9d6d9f86273caad0cbb14d36 100644 (file)
@@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -295,9 +301,6 @@ int r520_init(struct radeon_device *rdev)
        rv515_debugfs(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
-       if (r)
-               return r;
-       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        /* Memory manager */
index 1a08008c978bc413fe6e4ac5161c61053ed62c93..0e5341695922b504298b103df17c936e95a34d0a 100644 (file)
@@ -1046,6 +1046,24 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
        return -1;
 }
 
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+       uint32_t r;
+
+       WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+       r = RREG32(R_0028FC_MC_DATA);
+       WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+       return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+       WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+               S_0028F8_MC_IND_WR_EN(1));
+       WREG32(R_0028FC_MC_DATA, v);
+       WREG32(R_0028F8_MC_INDEX, 0x7F);
+}
+
 static void r600_mc_program(struct radeon_device *rdev)
 {
        struct rv515_mc_save save;
@@ -1181,6 +1199,8 @@ static int r600_mc_init(struct radeon_device *rdev)
 {
        u32 tmp;
        int chansize, numchan;
+       uint32_t h_addr, l_addr;
+       unsigned long long k8_addr;
 
        /* Get VRAM informations */
        rdev->mc.vram_is_ddr = true;
@@ -1221,7 +1241,30 @@ static int r600_mc_init(struct radeon_device *rdev)
        if (rdev->flags & RADEON_IS_IGP) {
                rs690_pm_info(rdev);
                rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+               if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+                       /* Use K8 direct mapping for fast fb access. */
+                       rdev->fastfb_working = false;
+                       h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+                       l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+                       k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+                       if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+                       {
+                               /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+                               * memory is present.
+                               */
+                               if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+                                       DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+                                               (unsigned long long)rdev->mc.aper_base, k8_addr);
+                                       rdev->mc.aper_base = (resource_size_t)k8_addr;
+                                       rdev->fastfb_working = true;
+                               }
+                       }
+               }
        }
+
        radeon_update_bandwidth_info(rdev);
        return 0;
 }
@@ -3202,6 +3245,12 @@ static int r600_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r = r600_irq_init(rdev);
        if (r) {
                DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -3356,10 +3405,6 @@ int r600_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       r = radeon_irq_kms_init(rdev);
-       if (r)
-               return r;
-
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
@@ -4631,8 +4676,6 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
 {
        u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
        u16 link_cntl2;
-       u32 mask;
-       int ret;
 
        if (radeon_pcie_gen2 == 0)
                return;
@@ -4651,11 +4694,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
        if (rdev->family <= CHIP_R600)
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
-               return;
-
-       if (!(mask & DRM_PCIE_SPEED_50))
+       if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+               (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
                return;
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
index 47f180a79352a48d5eb083df80181f0b7aab597f..456750a0daa5c98409e040d6c3ed4731e533a0a1 100644 (file)
@@ -232,7 +232,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 base_rate = 48000;
+       u32 base_rate = 24000;
 
        if (!dig || !dig->afmt)
                return;
@@ -240,7 +240,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
         * doesn't matter which one you use.  Just use the first one.
         */
-       /* XXX: properly calculate this */
        /* XXX two dtos; generally use dto0 for hdmi */
        /* Express [24MHz / target pixel clock] as an exact rational
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
@@ -250,13 +249,13 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
                /* according to the reg specs, this should DCE3.2 only, but in
                 * practice it seems to cover DCE3.0 as well.
                 */
-               WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
+               WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
                WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
                WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
        } else {
                /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
-               WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
-                      AUDIO_DTO_MODULE(clock * 100));
+               WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
+                      AUDIO_DTO_MODULE(clock / 10));
        }
 }
 
index acb146c06973301fed6654e7b7448983a91fcb08..79df558f8c4088ebe21929034c15bc5fd2c94d16 100644 (file)
 #define        PACKET3_STRMOUT_BASE_UPDATE                     0x72 /* r7xx */
 #define        PACKET3_SURFACE_BASE_UPDATE                     0x73
 
+#define R_000011_K8_FB_LOCATION                 0x11
+#define R_000012_MC_MISC_UMA_CNTL               0x12
+#define   G_000012_K8_ADDR_EXT(x)               (((x) >> 0) & 0xFF)
+#define R_0028F8_MC_INDEX                      0x28F8
+#define        S_0028F8_MC_IND_ADDR(x)                 (((x) & 0x1FF) << 0)
+#define        C_0028F8_MC_IND_ADDR                    0xFFFFFE00
+#define        S_0028F8_MC_IND_WR_EN(x)                (((x) & 0x1) << 9)
+#define R_0028FC_MC_DATA                        0x28FC
 
 #define        R_008020_GRBM_SOFT_RESET                0x8020
 #define                S_008020_SOFT_RESET_CP(x)               (((x) & 1) << 0)
index 1442ce765d48a74be071a89b9e7f30beee0332d7..142ce6cc69f5e3564299102ceaa2982adce628e7 100644 (file)
@@ -1694,6 +1694,7 @@ struct radeon_device {
        int num_crtc; /* number of crtcs */
        struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
        bool audio_enabled;
+       bool has_uvd;
        struct r600_audio audio_status; /* audio stuff */
        struct notifier_block acpi_nb;
        /* only one userspace can use Hyperz features or CMASK at a time */
@@ -1838,6 +1839,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
                             (rdev->flags & RADEON_IS_IGP))
 #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
+#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
 
 /*
  * BIOS helpers.
index 6417132c50cf5bf8df186225cc8233d182188197..a2802b47ee958e230d754f105214070f91ef1f23 100644 (file)
@@ -122,6 +122,10 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
                rdev->mc_rreg = &rs600_mc_rreg;
                rdev->mc_wreg = &rs600_mc_wreg;
        }
+       if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+               rdev->mc_rreg = &rs780_mc_rreg;
+               rdev->mc_wreg = &rs780_mc_wreg;
+       }
        if (rdev->family >= CHIP_R600) {
                rdev->pciep_rreg = &r600_pciep_rreg;
                rdev->pciep_wreg = &r600_pciep_wreg;
@@ -1935,6 +1939,8 @@ int radeon_asic_init(struct radeon_device *rdev)
        else
                rdev->num_crtc = 2;
 
+       rdev->has_uvd = false;
+
        switch (rdev->family) {
        case CHIP_R100:
        case CHIP_RV100:
@@ -1999,16 +2005,22 @@ int radeon_asic_init(struct radeon_device *rdev)
        case CHIP_RV635:
        case CHIP_RV670:
                rdev->asic = &r600_asic;
+               if (rdev->family == CHIP_R600)
+                       rdev->has_uvd = false;
+               else
+                       rdev->has_uvd = true;
                break;
        case CHIP_RS780:
        case CHIP_RS880:
                rdev->asic = &rs780_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_RV770:
        case CHIP_RV730:
        case CHIP_RV710:
        case CHIP_RV740:
                rdev->asic = &rv770_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_CEDAR:
        case CHIP_REDWOOD:
@@ -2021,11 +2033,13 @@ int radeon_asic_init(struct radeon_device *rdev)
                else
                        rdev->num_crtc = 6;
                rdev->asic = &evergreen_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_PALM:
        case CHIP_SUMO:
        case CHIP_SUMO2:
                rdev->asic = &sumo_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_BARTS:
        case CHIP_TURKS:
@@ -2036,27 +2050,37 @@ int radeon_asic_init(struct radeon_device *rdev)
                else
                        rdev->num_crtc = 6;
                rdev->asic = &btc_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_CAYMAN:
                rdev->asic = &cayman_asic;
                /* set num crtcs */
                rdev->num_crtc = 6;
+               rdev->has_uvd = true;
                break;
        case CHIP_ARUBA:
                rdev->asic = &trinity_asic;
                /* set num crtcs */
                rdev->num_crtc = 4;
+               rdev->has_uvd = true;
                break;
        case CHIP_TAHITI:
        case CHIP_PITCAIRN:
        case CHIP_VERDE:
        case CHIP_OLAND:
+       case CHIP_HAINAN:
                rdev->asic = &si_asic;
                /* set num crtcs */
-               if (rdev->family == CHIP_OLAND)
+               if (rdev->family == CHIP_HAINAN)
+                       rdev->num_crtc = 0;
+               else if (rdev->family == CHIP_OLAND)
                        rdev->num_crtc = 2;
                else
                        rdev->num_crtc = 6;
+               if (rdev->family == CHIP_HAINAN)
+                       rdev->has_uvd = false;
+               else
+                       rdev->has_uvd = true;
                break;
        default:
                /* FIXME: not supported yet */
index 2c87365d345fcf3a5128c9da26dd7b12df6ce757..a72759ede7538390a9360c0764670fcf161018c7 100644 (file)
@@ -347,6 +347,8 @@ extern bool r600_gui_idle(struct radeon_device *rdev);
 extern void r600_pm_misc(struct radeon_device *rdev);
 extern void r600_pm_init_profile(struct radeon_device *rdev);
 extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
 extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 extern int r600_get_pcie_lanes(struct radeon_device *rdev);
index fa3c56fba294fe1a433b2d7b6a3e1507e357d1ca..061b227dae0c45f88f506bf497c75faa1195aca1 100644 (file)
@@ -244,24 +244,28 @@ static bool ni_read_disabled_bios(struct radeon_device *rdev)
 
        /* enable the rom */
        WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
-       /* Disable VGA mode */
-       WREG32(AVIVO_D1VGA_CONTROL,
-              (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
-               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
-       WREG32(AVIVO_D2VGA_CONTROL,
-              (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
-               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
-       WREG32(AVIVO_VGA_RENDER_CONTROL,
-              (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+       if (!ASIC_IS_NODCE(rdev)) {
+               /* Disable VGA mode */
+               WREG32(AVIVO_D1VGA_CONTROL,
+                      (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(AVIVO_D2VGA_CONTROL,
+                      (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(AVIVO_VGA_RENDER_CONTROL,
+                      (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+       }
        WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
 
        r = radeon_read_bios(rdev);
 
        /* restore regs */
        WREG32(R600_BUS_CNTL, bus_cntl);
-       WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
-       WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
-       WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+               WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+               WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+       }
        WREG32(R600_ROM_CNTL, rom_cntl);
        return r;
 }
index a8f6089039896f47f22152746d6baeceb1b07603..189973836cff691ced976818f1bf045ed58e424b 100644 (file)
@@ -94,6 +94,7 @@ static const char radeon_family_name[][16] = {
        "PITCAIRN",
        "VERDE",
        "OLAND",
+       "HAINAN",
        "LAST",
 };
 
@@ -466,23 +467,27 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
+       /* required for EFI mode on macbook2,1 which uses an r5xx asic */
        if (efi_enabled(EFI_BOOT) &&
-           rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+           (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+           (rdev->family < CHIP_R600))
                return false;
 
+       if (ASIC_IS_NODCE(rdev))
+               goto check_memsize;
+
        /* first check CRTCs */
-       if (ASIC_IS_DCE41(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
                reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
                        RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-               if (reg & EVERGREEN_CRTC_MASTER_EN)
-                       return true;
-       } else if (ASIC_IS_DCE4(rdev)) {
-               reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+                       if (rdev->num_crtc >= 4) {
+                               reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+                                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+                       }
+                       if (rdev->num_crtc >= 6) {
+                               reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+                                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+                       }
                if (reg & EVERGREEN_CRTC_MASTER_EN)
                        return true;
        } else if (ASIC_IS_AVIVO(rdev)) {
@@ -499,6 +504,7 @@ bool radeon_card_posted(struct radeon_device *rdev)
                }
        }
 
+check_memsize:
        /* then check MEM_SIZE, in case the crtcs are off */
        if (rdev->family >= CHIP_R600)
                reg = RREG32(R600_CONFIG_MEMSIZE);
index e38fd559f1abb843b0b1733cfc92f4ddc301802b..eb18bb7af1cc007150871ae814ed6502c36d7c75 100644 (file)
@@ -271,8 +271,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        struct radeon_unpin_work *work;
-       struct drm_pending_vblank_event *e;
-       struct timeval now;
        unsigned long flags;
        u32 update_pending;
        int vpos, hpos;
@@ -328,14 +326,9 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
        radeon_crtc->unpin_work = NULL;
 
        /* wakeup userspace */
-       if (work->event) {
-               e = work->event;
-               e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
-               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
+       if (work->event)
+               drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+
        spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
        drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
index d33f484ace48792d746e812d81fd84d56ce5d4b8..094e7e5ea39e00fb391e63364c9c2f5978ce9c57 100644 (file)
@@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
 #endif
 
 int radeon_no_wb;
-int radeon_modeset = 1;
+int radeon_modeset = -1;
 int radeon_dynclks = -1;
 int radeon_r4xx_atom = 0;
 int radeon_agpmode = 0;
@@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = {
 
 static int __init radeon_init(void)
 {
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force() && radeon_modeset == -1) {
+               DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+               radeon_modeset = 0;
+       }
+#endif
+       /* set to modesetting by default if not nomodeset */
+       if (radeon_modeset == -1)
+               radeon_modeset = 1;
+
        if (radeon_modeset == 1) {
                DRM_INFO("radeon kernel modesetting enabled.\n");
                driver = &kms_driver;
index 2d91123f2759dd3556310581dda6fa188988e958..36e9803b077d8a5a50a54e0d9e1e70f16b691db7 100644 (file)
@@ -92,6 +92,7 @@ enum radeon_family {
        CHIP_PITCAIRN,
        CHIP_VERDE,
        CHIP_OLAND,
+       CHIP_HAINAN,
        CHIP_LAST,
 };
 
index 6857cb4efb768c932f23d4645c3969fd9c792f18..7cb178a34a0f18c0b50c7cd48a3e151f5ce05b4b 100644 (file)
@@ -1031,11 +1031,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
 
 static void radeon_crtc_prepare(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct drm_crtc *crtci;
 
-       radeon_crtc->in_mode_set = true;
        /*
        * The hardware wedges sometimes if you reconfigure one CRTC
        * whilst another is running (see fdo bug #24611).
@@ -1046,7 +1044,6 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
 
 static void radeon_crtc_commit(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct drm_crtc *crtci;
 
@@ -1057,7 +1054,6 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
                if (crtci->enabled)
                        radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
        }
-       radeon_crtc->in_mode_set = false;
 }
 
 static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
index 44e579e75fd0950e6a644d5be110687fbab0efae..69ad4fe224c1951eb370f6396b257b85dda7e4da 100644 (file)
@@ -302,7 +302,6 @@ struct radeon_crtc {
        u16 lut_r[256], lut_g[256], lut_b[256];
        bool enabled;
        bool can_tile;
-       bool in_mode_set;
        uint32_t crtc_offset;
        struct drm_gem_object *cursor_bo;
        uint64_t cursor_addr;
index 93f760e27a9200a81b94dfee8ce6d14ac935b49a..6c0ce8915fac9efc399654a8b25e4bd180285d0a 100644 (file)
@@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
                return r;
        }
        DRM_INFO("radeon: %uM of VRAM memory ready\n",
-                (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
+                (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
        r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
                                rdev->mc.gtt_size >> PAGE_SHIFT);
        if (r) {
index 73051ce3121ee480a770b3f697289d35e0fd46e0..233a9b9fa1f7a4dde360a30559f902a2d81d0555 100644 (file)
@@ -417,6 +417,12 @@ static int rs400_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -533,9 +539,6 @@ int rs400_init(struct radeon_device *rdev)
        rs400_mc_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
-       if (r)
-               return r;
-       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        /* Memory manager */
index 46fa1b07c5602388581bed64cc9bc5ea6065a35c..670b555d2ca229c3b39ac46cbbbf8e06e678f25b 100644 (file)
@@ -923,6 +923,12 @@ static int rs600_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -1045,9 +1051,6 @@ int rs600_init(struct radeon_device *rdev)
        rs600_debugfs(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
-       if (r)
-               return r;
-       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        /* Memory manager */
index ab4c86cfd5526f1884a4a7575cd29f0341e0b59b..55880d5962c31b3672dbfe9146df411af89618d1 100644 (file)
@@ -651,6 +651,12 @@ static int rs690_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -774,9 +780,6 @@ int rs690_init(struct radeon_device *rdev)
        rv515_debugfs(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
-       if (r)
-               return r;
-       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        /* Memory manager */
index ffcba730c57cb3172a4084d696d1cc70e9ec0adf..21c7d7b26e55547da60fa5dbe12818f11ae6f3bd 100644 (file)
@@ -532,6 +532,12 @@ static int rv515_startup(struct radeon_device *rdev)
        }
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
        /* 1M ring buffer */
@@ -660,9 +666,6 @@ int rv515_init(struct radeon_device *rdev)
        rv515_debugfs(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
-       if (r)
-               return r;
-       r = radeon_irq_kms_init(rdev);
        if (r)
                return r;
        /* Memory manager */
index 83f612a9500ba288269241f4b17fafa26dc9841a..4a62ad2e539944ffaefc73b7169733bfefff86ff 100644 (file)
@@ -862,10 +862,8 @@ int rv770_uvd_resume(struct radeon_device *rdev)
                chip_id = 0x0100000b;
                break;
        case CHIP_SUMO:
-               chip_id = 0x0100000c;
-               break;
        case CHIP_SUMO2:
-               chip_id = 0x0100000d;
+               chip_id = 0x0100000c;
                break;
        case CHIP_PALM:
                chip_id = 0x0100000e;
@@ -1889,6 +1887,12 @@ static int rv770_startup(struct radeon_device *rdev)
                rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r = r600_irq_init(rdev);
        if (r) {
                DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2047,10 +2051,6 @@ int rv770_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       r = radeon_irq_kms_init(rdev);
-       if (r)
-               return r;
-
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
@@ -2113,8 +2113,6 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
 {
        u32 link_width_cntl, lanes, speed_cntl, tmp;
        u16 link_cntl2;
-       u32 mask;
-       int ret;
 
        if (radeon_pcie_gen2 == 0)
                return;
@@ -2129,11 +2127,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
        if (ASIC_IS_X2(rdev))
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
-               return;
-
-       if (!(mask & DRM_PCIE_SPEED_50))
+       if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+               (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
                return;
 
        DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
index f0b6c2f87c4d70fd752d41918a792a43b6b93cac..a1b0da6b580859dfede1703ee822e8cab28fe91f 100644 (file)
@@ -60,6 +60,11 @@ MODULE_FIRMWARE("radeon/OLAND_me.bin");
 MODULE_FIRMWARE("radeon/OLAND_ce.bin");
 MODULE_FIRMWARE("radeon/OLAND_mc.bin");
 MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
+MODULE_FIRMWARE("radeon/HAINAN_me.bin");
+MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
 
 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
 extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -265,6 +270,40 @@ static const u32 oland_golden_registers[] =
        0x15c0, 0x000c0fc0, 0x000c0400
 };
 
+static const u32 hainan_golden_registers[] =
+{
+       0x9a10, 0x00010000, 0x00018208,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0x0002021c, 0x00020200,
+       0xd0c0, 0xff000fff, 0x00000100,
+       0xd030, 0x000300c0, 0x00800040,
+       0xd8c0, 0xff000fff, 0x00000100,
+       0xd830, 0x000300c0, 0x00800040,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x4e000000,
+       0x28350, 0x3f3f3fff, 0x00000000,
+       0x30, 0x000000ff, 0x0040,
+       0x34, 0x00000040, 0x00004040,
+       0x9100, 0x03e00000, 0x03600000,
+       0x9060, 0x0000007f, 0x00000020,
+       0x9508, 0x00010000, 0x00010000,
+       0xac14, 0x000003ff, 0x000000f1,
+       0xac10, 0xffffffff, 0x00000000,
+       0xac0c, 0xffffffff, 0x00003210,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+       0x98f8, 0xffffffff, 0x02010001
+};
+
 static const u32 tahiti_mgcg_cgcg_init[] =
 {
        0xc400, 0xffffffff, 0xfffffffc,
@@ -673,6 +712,83 @@ static const u32 oland_mgcg_cgcg_init[] =
        0xd8c0, 0xfffffff0, 0x00000100
 };
 
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+       0xc400, 0xffffffff, 0xfffffffc,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x92a4, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x9774, 0xffffffff, 0x00000100,
+       0x8984, 0xffffffff, 0x06000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x92a0, 0xffffffff, 0x00000100,
+       0xc380, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x8d88, 0xffffffff, 0x00000100,
+       0x8d8c, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0xad80, 0xffffffff, 0x00000100,
+       0xac54, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0xaf04, 0xffffffff, 0x00000100,
+       0xae04, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9160, 0xffffffff, 0x00010000,
+       0x9164, 0xffffffff, 0x00030002,
+       0x9168, 0xffffffff, 0x00040007,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00090008,
+       0x9174, 0xffffffff, 0x00020001,
+       0x9178, 0xffffffff, 0x00040003,
+       0x917c, 0xffffffff, 0x00000007,
+       0x9180, 0xffffffff, 0x00060005,
+       0x9184, 0xffffffff, 0x00090008,
+       0x9188, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00050004,
+       0x9190, 0xffffffff, 0x00000008,
+       0x9194, 0xffffffff, 0x00070006,
+       0x9198, 0xffffffff, 0x000a0009,
+       0x919c, 0xffffffff, 0x00040003,
+       0x91a0, 0xffffffff, 0x00060005,
+       0x91a4, 0xffffffff, 0x00000009,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000b000a,
+       0x91b0, 0xffffffff, 0x00050004,
+       0x91b4, 0xffffffff, 0x00070006,
+       0x91b8, 0xffffffff, 0x0008000b,
+       0x91bc, 0xffffffff, 0x000a0009,
+       0x91c0, 0xffffffff, 0x000d000c,
+       0x91c4, 0xffffffff, 0x00060005,
+       0x91c8, 0xffffffff, 0x00080007,
+       0x91cc, 0xffffffff, 0x0000000b,
+       0x91d0, 0xffffffff, 0x000a0009,
+       0x91d4, 0xffffffff, 0x000d000c,
+       0x9150, 0xffffffff, 0x96940200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc478, 0xffffffff, 0x00000080,
+       0xc404, 0xffffffff, 0x0020003f,
+       0x30, 0xffffffff, 0x0000001c,
+       0x34, 0x000f0000, 0x000f0000,
+       0x160c, 0xffffffff, 0x00000100,
+       0x1024, 0xffffffff, 0x00000100,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x264c, 0x000c0000, 0x000c0000,
+       0x2648, 0x000c0000, 0x000c0000,
+       0x2f50, 0x00000001, 0x00000001,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd0c0, 0xfffffff0, 0x00000100,
+       0xd8c0, 0xfffffff0, 0x00000100
+};
+
 static u32 verde_pg_init[] =
 {
        0x353c, 0xffffffff, 0x40000,
@@ -853,6 +969,17 @@ static void si_init_golden_registers(struct radeon_device *rdev)
                                                 oland_mgcg_cgcg_init,
                                                 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
                break;
+       case CHIP_HAINAN:
+               radeon_program_register_sequence(rdev,
+                                                hainan_golden_registers,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                hainan_golden_registers2,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers2));
+               radeon_program_register_sequence(rdev,
+                                                hainan_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+               break;
        default:
                break;
        }
@@ -1062,6 +1189,45 @@ static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
        {0x0000009f, 0x00a17730}
 };
 
+static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+       {0x0000006f, 0x03044000},
+       {0x00000070, 0x0480c018},
+       {0x00000071, 0x00000040},
+       {0x00000072, 0x01000000},
+       {0x00000074, 0x000000ff},
+       {0x00000075, 0x00143400},
+       {0x00000076, 0x08ec0800},
+       {0x00000077, 0x040000cc},
+       {0x00000079, 0x00000000},
+       {0x0000007a, 0x21000409},
+       {0x0000007c, 0x00000000},
+       {0x0000007d, 0xe8000000},
+       {0x0000007e, 0x044408a8},
+       {0x0000007f, 0x00000003},
+       {0x00000080, 0x00000000},
+       {0x00000081, 0x01000000},
+       {0x00000082, 0x02000000},
+       {0x00000083, 0x00000000},
+       {0x00000084, 0xe3f3e4f4},
+       {0x00000085, 0x00052024},
+       {0x00000087, 0x00000000},
+       {0x00000088, 0x66036603},
+       {0x00000089, 0x01000000},
+       {0x0000008b, 0x1c0a0000},
+       {0x0000008c, 0xff010000},
+       {0x0000008e, 0xffffefff},
+       {0x0000008f, 0xfff3efff},
+       {0x00000090, 0xfff3efbf},
+       {0x00000094, 0x00101101},
+       {0x00000095, 0x00000fff},
+       {0x00000096, 0x00116fff},
+       {0x00000097, 0x60010000},
+       {0x00000098, 0x10010000},
+       {0x00000099, 0x00006000},
+       {0x0000009a, 0x00001000},
+       {0x0000009f, 0x00a07730}
+};
+
 /* ucode loading */
 static int si_mc_load_microcode(struct radeon_device *rdev)
 {
@@ -1095,6 +1261,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
                ucode_size = OLAND_MC_UCODE_SIZE;
                regs_size = TAHITI_IO_MC_REGS_SIZE;
                break;
+       case CHIP_HAINAN:
+               io_mc_regs = (u32 *)&hainan_io_mc_regs;
+               ucode_size = OLAND_MC_UCODE_SIZE;
+               regs_size = TAHITI_IO_MC_REGS_SIZE;
+               break;
        }
 
        running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1198,6 +1369,15 @@ static int si_init_microcode(struct radeon_device *rdev)
                rlc_req_size = SI_RLC_UCODE_SIZE * 4;
                mc_req_size = OLAND_MC_UCODE_SIZE * 4;
                break;
+       case CHIP_HAINAN:
+               chip_name = "HAINAN";
+               rlc_chip_name = "HAINAN";
+               pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+               me_req_size = SI_PM4_UCODE_SIZE * 4;
+               ce_req_size = SI_CE_UCODE_SIZE * 4;
+               rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+               mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+               break;
        default: BUG();
        }
 
@@ -2003,7 +2183,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
                        WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if ((rdev->family == CHIP_VERDE) ||
-                  (rdev->family == CHIP_OLAND)) {
+                  (rdev->family == CHIP_OLAND) ||
+                  (rdev->family == CHIP_HAINAN)) {
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
                        switch (reg_offset) {
                        case 0:  /* non-AA compressed depth or any compressed stencil */
@@ -2435,7 +2616,7 @@ static void si_gpu_init(struct radeon_device *rdev)
        default:
                rdev->config.si.max_shader_engines = 1;
                rdev->config.si.max_tile_pipes = 4;
-               rdev->config.si.max_cu_per_sh = 2;
+               rdev->config.si.max_cu_per_sh = 5;
                rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 4;
@@ -2466,6 +2647,23 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
                gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
                break;
+       case CHIP_HAINAN:
+               rdev->config.si.max_shader_engines = 1;
+               rdev->config.si.max_tile_pipes = 4;
+               rdev->config.si.max_cu_per_sh = 5;
+               rdev->config.si.max_sh_per_se = 1;
+               rdev->config.si.max_backends_per_se = 1;
+               rdev->config.si.max_texture_channel_caches = 2;
+               rdev->config.si.max_gprs = 256;
+               rdev->config.si.max_gs_threads = 16;
+               rdev->config.si.max_hw_contexts = 8;
+
+               rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+               rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+               rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+               break;
        }
 
        /* Initialize HDP */
@@ -2559,9 +2757,11 @@ static void si_gpu_init(struct radeon_device *rdev)
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
-       WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
-       WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
-       WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+       if (rdev->has_uvd) {
+               WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+       }
 
        si_tiling_mode_table_init(rdev);
 
@@ -3304,8 +3504,9 @@ static void si_mc_program(struct radeon_device *rdev)
        if (radeon_mc_wait_for_idle(rdev)) {
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
-       /* Lockout access through VGA aperture*/
-       WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+       if (!ASIC_IS_NODCE(rdev))
+               /* Lockout access through VGA aperture*/
+               WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
        /* Update configuration */
        WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
               rdev->mc.vram_start >> 12);
@@ -3327,9 +3528,11 @@ static void si_mc_program(struct radeon_device *rdev)
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
        evergreen_mc_resume(rdev, &save);
-       /* we need to own VRAM, so turn off the VGA renderer here
-        * to stop it overwriting our objects */
-       rv515_vga_render_disable(rdev);
+       if (!ASIC_IS_NODCE(rdev)) {
+               /* we need to own VRAM, so turn off the VGA renderer here
+                * to stop it overwriting our objects */
+               rv515_vga_render_disable(rdev);
+       }
 }
 
 static void si_vram_gtt_location(struct radeon_device *rdev,
@@ -3397,8 +3600,8 @@ static int si_mc_init(struct radeon_device *rdev)
        rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
        rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        /* size in MB on si */
-       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        si_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
@@ -4251,8 +4454,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
        tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
        WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
        WREG32(GRBM_INT_CNTL, 0);
-       WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-       WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       if (rdev->num_crtc >= 2) {
+               WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+               WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -4262,8 +4467,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
 
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -4273,21 +4480,22 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
 
-       WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
-
-       tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD1_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD2_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD3_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD4_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD5_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD6_INT_CONTROL, tmp);
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 
+               tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD1_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD2_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD3_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD5_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD6_INT_CONTROL, tmp);
+       }
 }
 
 static int si_irq_init(struct radeon_device *rdev)
@@ -4366,7 +4574,7 @@ int si_irq_set(struct radeon_device *rdev)
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
        u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
-       u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+       u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
        u32 grbm_int_cntl = 0;
        u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 dma_cntl, dma_cntl1;
@@ -4383,12 +4591,14 @@ int si_irq_set(struct radeon_device *rdev)
                return 0;
        }
 
-       hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       if (!ASIC_IS_NODCE(rdev)) {
+               hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       }
 
        dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
        dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -4479,8 +4689,10 @@ int si_irq_set(struct radeon_device *rdev)
 
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
-       WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
-       WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+       if (rdev->num_crtc >= 2) {
+               WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+               WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
                WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
@@ -4490,8 +4702,10 @@ int si_irq_set(struct radeon_device *rdev)
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
        }
 
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
@@ -4501,12 +4715,14 @@ int si_irq_set(struct radeon_device *rdev)
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
        }
 
-       WREG32(DC_HPD1_INT_CONTROL, hpd1);
-       WREG32(DC_HPD2_INT_CONTROL, hpd2);
-       WREG32(DC_HPD3_INT_CONTROL, hpd3);
-       WREG32(DC_HPD4_INT_CONTROL, hpd4);
-       WREG32(DC_HPD5_INT_CONTROL, hpd5);
-       WREG32(DC_HPD6_INT_CONTROL, hpd6);
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(DC_HPD1_INT_CONTROL, hpd1);
+               WREG32(DC_HPD2_INT_CONTROL, hpd2);
+               WREG32(DC_HPD3_INT_CONTROL, hpd3);
+               WREG32(DC_HPD4_INT_CONTROL, hpd4);
+               WREG32(DC_HPD5_INT_CONTROL, hpd5);
+               WREG32(DC_HPD6_INT_CONTROL, hpd6);
+       }
 
        return 0;
 }
@@ -4515,6 +4731,9 @@ static inline void si_irq_ack(struct radeon_device *rdev)
 {
        u32 tmp;
 
+       if (ASIC_IS_NODCE(rdev))
+               return;
+
        rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
        rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
        rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
@@ -5118,17 +5337,25 @@ static int si_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = rv770_uvd_resume(rdev);
-       if (!r) {
-               r = radeon_fence_driver_start_ring(rdev,
-                                                  R600_RING_TYPE_UVD_INDEX);
+       if (rdev->has_uvd) {
+               r = rv770_uvd_resume(rdev);
+               if (!r) {
+                       r = radeon_fence_driver_start_ring(rdev,
+                                                          R600_RING_TYPE_UVD_INDEX);
+                       if (r)
+                               dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+               }
                if (r)
-                       dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+                       rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
        }
-       if (r)
-               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
 
        /* Enable IRQ */
+       if (!rdev->irq.installed) {
+               r = radeon_irq_kms_init(rdev);
+               if (r)
+                       return r;
+       }
+
        r = si_irq_init(rdev);
        if (r) {
                DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -5185,16 +5412,18 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       if (ring->ring_size) {
-               r = radeon_ring_init(rdev, ring, ring->ring_size,
-                                    R600_WB_UVD_RPTR_OFFSET,
-                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
-                                    0, 0xfffff, RADEON_CP_PACKET2);
-               if (!r)
-                       r = r600_uvd_init(rdev);
-               if (r)
-                       DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+       if (rdev->has_uvd) {
+               ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+               if (ring->ring_size) {
+                       r = radeon_ring_init(rdev, ring, ring->ring_size,
+                                            R600_WB_UVD_RPTR_OFFSET,
+                                            UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+                                            0, 0xfffff, RADEON_CP_PACKET2);
+                       if (!r)
+                               r = r600_uvd_init(rdev);
+                       if (r)
+                               DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+               }
        }
 
        r = radeon_ib_pool_init(rdev);
@@ -5243,8 +5472,10 @@ int si_suspend(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        si_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
-       r600_uvd_rbc_stop(rdev);
-       radeon_uvd_suspend(rdev);
+       if (rdev->has_uvd) {
+               r600_uvd_rbc_stop(rdev);
+               radeon_uvd_suspend(rdev);
+       }
        si_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        si_pcie_gart_disable(rdev);
@@ -5308,10 +5539,6 @@ int si_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       r = radeon_irq_kms_init(rdev);
-       if (r)
-               return r;
-
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
@@ -5332,11 +5559,13 @@ int si_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 64 * 1024);
 
-       r = radeon_uvd_init(rdev);
-       if (!r) {
-               ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-               ring->ring_obj = NULL;
-               r600_ring_init(rdev, ring, 4096);
+       if (rdev->has_uvd) {
+               r = radeon_uvd_init(rdev);
+               if (!r) {
+                       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+                       ring->ring_obj = NULL;
+                       r600_ring_init(rdev, ring, 4096);
+               }
        }
 
        rdev->ih.ring_obj = NULL;
@@ -5384,7 +5613,8 @@ void si_fini(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
-       radeon_uvd_fini(rdev);
+       if (rdev->has_uvd)
+               radeon_uvd_fini(rdev);
        si_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
index 222877ba6cf5b3f9e8deb0738b15d5662aca0cf0..8f2d7d4f9b282e05f7d1e4f2e0b9d2e5325967c4 100644 (file)
@@ -28,6 +28,7 @@
 
 #define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
 #define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
 
 /* discrete uvd clocks */
 #define        CG_UPLL_FUNC_CNTL                               0x634
index 7dff49ed66e763c4f0214a4a52ef0864f798b99f..99e2034e49ccf0687e4096e5912b025e6dc3df4f 100644 (file)
@@ -451,27 +451,16 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
 {
        struct drm_pending_vblank_event *event;
        struct drm_device *dev = scrtc->crtc.dev;
-       struct timeval vblanktime;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
        event = scrtc->event;
        scrtc->event = NULL;
+       if (event) {
+               drm_send_vblank_event(dev, 0, event);
+               drm_vblank_put(dev, 0);
+       }
        spin_unlock_irqrestore(&dev->event_lock, flags);
-
-       if (event == NULL)
-               return;
-
-       event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime);
-       event->event.tv_sec = vblanktime.tv_sec;
-       event->event.tv_usec = vblanktime.tv_usec;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       list_add_tail(&event->base.link, &event->base.file_priv->event_list);
-       wake_up_interruptible(&event->base.file_priv->event_wait);
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-
-       drm_vblank_put(dev, 0);
 }
 
 static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
index e461e99724552f96014581f81f191df8baed89ef..7a4d10106906e92e91e75763e0d96b07557bfbff 100644 (file)
@@ -6,6 +6,7 @@ config DRM_TILCDC
        select DRM_GEM_CMA_HELPER
        select VIDEOMODE_HELPERS
        select BACKLIGHT_CLASS_DEVICE
+       select BACKLIGHT_LCD_SUPPORT
        help
          Choose this option if you have an TI SoC with LCDC display
          controller, for example AM33xx in beagle-bone, DA8xx, or
index 1e2060324f02fd77cbbb3c4d5e2081816cbd0a53..8c04943f82e3531d75ee5565eab06c16e6f46b16 100644 (file)
@@ -1128,11 +1128,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return err;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_err(&pdev->dev, "failed to get registers\n");
-               return -ENXIO;
-       }
-
        dc->regs = devm_ioremap_resource(&pdev->dev, regs);
        if (IS_ERR(dc->regs))
                return PTR_ERR(dc->regs);
index dc3ae5c56f563bf7735ba4c6ee4bffa0f24ccdd0..d39a5cede0b0a59f15f5e21d85d1913086624da6 100644 (file)
@@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = {
 static void mt_free_input_name(struct hid_input *hi)
 {
        struct hid_device *hdev = hi->report->device;
+       const char *name = hi->input->name;
 
-       if (hi->input->name != hdev->name)
-               kfree(hi->input->name);
+       if (name != hdev->name) {
+               hi->input->name = hdev->name;
+               kfree(name);
+       }
 }
 
 static ssize_t mt_show_quirks(struct device *dev,
@@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev)
        struct hid_input *hi;
 
        sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
-       hid_hw_stop(hdev);
-
        list_for_each_entry(hi, &hdev->inputs, list)
                mt_free_input_name(hi);
 
+       hid_hw_stop(hdev);
+
        kfree(td);
        hid_set_drvdata(hdev, NULL);
 }
index bad8128b283a8752a11e1c3a13d02c83bb1df07b..21ef68934a20bba366c1ea1c5be9705f0fafe6fa 100644 (file)
@@ -329,7 +329,7 @@ static u32 get_vp_index(uuid_le *type_guid)
                return 0;
        }
        cur_cpu = (++next_vp % max_cpus);
-       return cur_cpu;
+       return hv_context.vp_index[cur_cpu];
 }
 
 /*
index df0b69987914d0c2241d746956db3d9d4e02a351..2ebd6ce46108ec4e44797cb695194672ad81bdac 100644 (file)
@@ -1414,14 +1414,18 @@ static int abituguru_probe(struct platform_device *pdev)
        pr_info("found Abit uGuru\n");
 
        /* Register sysfs hooks */
-       for (i = 0; i < sysfs_attr_i; i++)
-               if (device_create_file(&pdev->dev,
-                               &data->sysfs_attr[i].dev_attr))
+       for (i = 0; i < sysfs_attr_i; i++) {
+               res = device_create_file(&pdev->dev,
+                                        &data->sysfs_attr[i].dev_attr);
+               if (res)
                        goto abituguru_probe_error;
-       for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
-               if (device_create_file(&pdev->dev,
-                               &abituguru_sysfs_attr[i].dev_attr))
+       }
+       for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
+               res = device_create_file(&pdev->dev,
+                                        &abituguru_sysfs_attr[i].dev_attr);
+               if (res)
                        goto abituguru_probe_error;
+       }
 
        data->hwmon_dev = hwmon_device_register(&pdev->dev);
        if (!IS_ERR(data->hwmon_dev))
index 7e76922a4ba9b0f9a56191b2afb1fba5814ccdf0..f920619cd6da5405ca5cd9c64e9f1ad31fb8a526 100644 (file)
@@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client,
        man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
        dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
 
+       if (man_id < 0 || dev_id < 0)
+               return -ENODEV;
+
        if (man_id == 0x4d && dev_id == 0x01)
                type_name = "max1617a";
        else if (man_id == 0x41) {
                if ((dev_id & 0xF0) == 0x30)
                        type_name = "adm1023";
-               else
+               else if ((dev_id & 0xF0) == 0x00)
                        type_name = "adm1021";
+               else
+                       return -ENODEV;
        } else if (man_id == 0x49)
                type_name = "thmc10";
        else if (man_id == 0x23)
                type_name = "gl523sm";
        else if (man_id == 0x54)
                type_name = "mc1066";
-       /* LM84 Mfr ID in a different place, and it has more unused bits */
-       else if (conv_rate == 0x00
-                && (config & 0x7F) == 0x00
-                && (status & 0xAB) == 0x00)
-               type_name = "lm84";
-       else
-               type_name = "max1617";
+       else {
+               int lte, rte, lhi, rhi, llo, rlo;
+
+               /* extra checks for LM84 and MAX1617 to avoid misdetections */
+
+               llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
+               rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
+
+               /* fail if any of the additional register reads failed */
+               if (llo < 0 || rlo < 0)
+                       return -ENODEV;
+
+               lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
+               rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
+               lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
+               rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
+
+               /*
+                * Fail for negative temperatures and negative high limits.
+                * This check also catches read errors on the tested registers.
+                */
+               if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
+                       return -ENODEV;
+
+               /* fail if all registers hold the same value */
+               if (lte == rte && lte == lhi && lte == rhi && lte == llo
+                   && lte == rlo)
+                       return -ENODEV;
+
+               /*
+                * LM84 Mfr ID is in a different place,
+                * and it has more unused bits.
+                */
+               if (conv_rate == 0x00
+                   && (config & 0x7F) == 0x00
+                   && (status & 0xAB) == 0x00) {
+                       type_name = "lm84";
+               } else {
+                       /* fail if low limits are larger than high limits */
+                       if ((s8)llo > lhi || (s8)rlo > rhi)
+                               return -ENODEV;
+                       type_name = "max1617";
+               }
+       }
 
        pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
                 type_name, i2c_adapter_id(adapter), client->addr);
index aafa4531b9614e4b068c9b91698927272c7dc588..52b77afebde120a17ce75bf6401103cad556d021 100644 (file)
@@ -84,8 +84,10 @@ static int iio_hwmon_probe(struct platform_device *pdev)
                return PTR_ERR(channels);
 
        st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
-       if (st == NULL)
-               return -ENOMEM;
+       if (st == NULL) {
+               ret = -ENOMEM;
+               goto error_release_channels;
+       }
 
        st->channels = channels;
 
@@ -159,7 +161,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
 error_remove_group:
        sysfs_remove_group(&dev->kobj, &st->attr_group);
 error_release_channels:
-       iio_channel_release_all(st->channels);
+       iio_channel_release_all(channels);
        return ret;
 }
 
index f43f5e571db97a9b178f76a88ef82e8414c9ff39..04638aee90398f44e73a620e3a3cffc9631b8d40 100644 (file)
@@ -3705,8 +3705,10 @@ static int nct6775_probe(struct platform_device *pdev)
                        data->have_temp |= 1 << i;
                        data->have_temp_fixed |= 1 << i;
                        data->reg_temp[0][i] = reg_temp_alternate[i];
-                       data->reg_temp[1][i] = reg_temp_over[i];
-                       data->reg_temp[2][i] = reg_temp_hyst[i];
+                       if (i < num_reg_temp) {
+                               data->reg_temp[1][i] = reg_temp_over[i];
+                               data->reg_temp[2][i] = reg_temp_hyst[i];
+                       }
                        data->temp_src[i] = i + 1;
                        continue;
                }
index a478454f690fbc318aefa16b8952040944692149..dfe6d9527efb4b6489676468d495e56e7841b2a9 100644 (file)
@@ -240,7 +240,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev)
        mutex_lock(&data->update_lock);
 
        next_update = data->last_updated +
-                     msecs_to_jiffies(data->update_interval) + 1;
+                     msecs_to_jiffies(data->update_interval);
        if (time_after(jiffies, next_update) || !data->valid) {
                if (data->kind != tmp432) {
                        /*
index 21fbb340ad6603a249d052d8f7accb6aed0f7753..c41ca6354fc59d8d2111dfdbbb47bdab87f725a8 100644 (file)
@@ -383,7 +383,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
        /* Enable the adapter */
        __i2c_dw_enable(dev, true);
 
-       /* Enable interrupts */
+       /* Clear and enable interrupts */
+       i2c_dw_clear_int(dev);
        dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
 }
 
@@ -448,8 +449,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
                                cmd |= BIT(9);
 
                        if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+
+                               /* avoid rx buffer overrun */
+                               if (rx_limit - dev->rx_outstanding <= 0)
+                                       break;
+
                                dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
                                rx_limit--;
+                               dev->rx_outstanding++;
                        } else
                                dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
                        tx_limit--; buf_len--;
@@ -502,8 +509,10 @@ i2c_dw_read(struct dw_i2c_dev *dev)
 
                rx_valid = dw_readl(dev, DW_IC_RXFLR);
 
-               for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+               for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
                        *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+                       dev->rx_outstanding--;
+               }
 
                if (len > 0) {
                        dev->status |= STATUS_READ_IN_PROGRESS;
@@ -561,6 +570,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        dev->msg_err = 0;
        dev->status = STATUS_IDLE;
        dev->abort_source = 0;
+       dev->rx_outstanding = 0;
 
        ret = i2c_dw_wait_bus_not_busy(dev);
        if (ret < 0)
index 9c1840ee09c7a4fde94a468a5ff339bdb42ae2ac..e761ad18dd61b8888e77a2989b772f79431d8b26 100644 (file)
@@ -60,6 +60,7 @@
  * @adapter: i2c subsystem adapter node
  * @tx_fifo_depth: depth of the hardware tx fifo
  * @rx_fifo_depth: depth of the hardware rx fifo
+ * @rx_outstanding: current master-rx elements in tx fifo
  */
 struct dw_i2c_dev {
        struct device           *dev;
@@ -88,6 +89,7 @@ struct dw_i2c_dev {
        u32                     master_cfg;
        unsigned int            tx_fifo_depth;
        unsigned int            rx_fifo_depth;
+       int                     rx_outstanding;
 };
 
 #define ACCESS_SWAP            0x00000001
index 8ec91335d95a52c679193cabbd4a522cbb3851bc..35b70a1edf572bedda30222ec8c9d49693491314 100644 (file)
@@ -69,6 +69,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
 static const struct acpi_device_id dw_i2c_acpi_match[] = {
        { "INT33C2", 0 },
        { "INT33C3", 0 },
+       { "80860F41", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
index e1cf2e0e1f23de900e92c1839d8c6e856658ea37..3a6903f639137af09d28ab623c345d01812bd0e7 100644 (file)
@@ -231,7 +231,11 @@ static const char *i801_feature_names[] = {
 
 static unsigned int disable_features;
 module_param(disable_features, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n"
+       "\t\t  0x01  disable SMBus PEC\n"
+       "\t\t  0x02  disable the block buffer\n"
+       "\t\t  0x08  disable the I2C block read functionality\n"
+       "\t\t  0x10  don't use interrupts ");
 
 /* Make sure the SMBus host is ready to start transmitting.
    Return 0 if it is, -EBUSY if it is not. */
index 3bbd65d35a5e05365361a4d7711e41de99fa8a31..1a3abd6a0bfcb7166bc7cfd7a9cb7be15016e48e 100644 (file)
@@ -252,7 +252,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                writel(drv_data->cntl_bits,
                        drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
                drv_data->block = 0;
-               wake_up_interruptible(&drv_data->waitq);
+               wake_up(&drv_data->waitq);
                break;
 
        case MV64XXX_I2C_ACTION_CONTINUE:
@@ -300,7 +300,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
                        drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
                drv_data->block = 0;
-               wake_up_interruptible(&drv_data->waitq);
+               wake_up(&drv_data->waitq);
                break;
 
        case MV64XXX_I2C_ACTION_INVALID:
@@ -315,7 +315,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
                        drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
                drv_data->block = 0;
-               wake_up_interruptible(&drv_data->waitq);
+               wake_up(&drv_data->waitq);
                break;
        }
 }
@@ -381,7 +381,7 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
        unsigned long   flags;
        char            abort = 0;
 
-       time_left = wait_event_interruptible_timeout(drv_data->waitq,
+       time_left = wait_event_timeout(drv_data->waitq,
                !drv_data->block, drv_data->adapter.timeout);
 
        spin_lock_irqsave(&drv_data->lock, flags);
index 6e8ee92ab55398aec8f5517334166c38e9eb0ba8..cab1c91b75a3a8e300057ef2aaf3e98b6af999d4 100644 (file)
@@ -1082,11 +1082,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        /* map the registers */
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL) {
-               dev_err(&pdev->dev, "cannot find IO resource\n");
-               return -ENOENT;
-       }
-
        i2c->regs = devm_ioremap_resource(&pdev->dev, res);
 
        if (IS_ERR(i2c->regs))
index 5a7ad240bd264bd53764f3053acab0e7127a28b7..a63c7d50683676f5e01969573ffce373b3ce578f 100644 (file)
@@ -303,12 +303,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
        adap->class = I2C_CLASS_HWMON;
 
        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (mem_res == NULL) {
-               dev_err(&pdev->dev, "Unable to get MEM resource\n");
-               err = -EINVAL;
-               goto out;
-       }
-
        siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
        if (IS_ERR(siic->base)) {
                err = PTR_ERR(siic->base);
index b60ff90adc39a92ed3940a53076d53cf6f7ae4f6..9aa1b60f7fdd86581e2f694d96a2834e0ecad055 100644 (file)
@@ -714,11 +714,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        int ret = 0;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no mem resource\n");
-               return -EINVAL;
-       }
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
index 6b63cc7eb71e38ff31253c0e6af496b3b3b0bccb..48e31ed69dbf159f5e44f2ab2a4968896320c3eb 100644 (file)
@@ -892,7 +892,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
 }
 
 static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
-static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device);
+static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
+                                  i2c_sysfs_delete_device);
 
 static struct attribute *i2c_adapter_attrs[] = {
        &dev_attr_name.attr,
index 9f3a8ef1fb3e4484bcbc027337ce75a3f6e13425..b3d03d335948a5a5757bd4d49650b1685cbfb78b 100644 (file)
@@ -390,8 +390,8 @@ static int exynos_adc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int exynos_adc_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct exynos_adc *info = platform_get_drvdata(pdev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct exynos_adc *info = iio_priv(indio_dev);
        u32 con;
 
        if (info->version == ADC_V2) {
@@ -413,8 +413,8 @@ static int exynos_adc_suspend(struct device *dev)
 
 static int exynos_adc_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct exynos_adc *info = platform_get_drvdata(pdev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct exynos_adc *info = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_enable(info->vdd);
index 9201022945e966719c4322e9242d0582316a9eee..9d19ba74f22bd92125501e2827ef51bbae59da13 100644 (file)
@@ -64,7 +64,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
        while (chan->indio_dev) {
                if (chan->indio_dev != indio_dev) {
                        ret = -EINVAL;
-                       goto error_release_channels;
+                       goto error_free_scan_mask;
                }
                set_bit(chan->channel->scan_index,
                        cb_buff->buffer.scan_mask);
@@ -73,6 +73,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
 
        return cb_buff;
 
+error_free_scan_mask:
+       kfree(cb_buff->buffer.scan_mask);
 error_release_channels:
        iio_channel_release_all(cb_buff->channels);
 error_free_cb_buff:
@@ -100,6 +102,7 @@ EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
 
 void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
 {
+       kfree(cb_buff->buffer.scan_mask);
        iio_channel_release_all(cb_buff->channels);
        kfree(cb_buff);
 }
index bd33473f8e38afd4deeeb243f12c2bf981587740..ed9bc8ae933030158c9d23b41acfc2bd2de7d61e 100644 (file)
@@ -312,6 +312,8 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
                        goto read_error;
 
                *val = *val >> ch->scan_type.shift;
+
+               err = st_sensors_set_enable(indio_dev, false);
        }
        mutex_unlock(&indio_dev->mlock);
 
index f4a6f0838327daaffe032bc4b3403d4c59bc83a7..b61160bd935eafffd7e04207a0e0fb043f87450f 100644 (file)
@@ -5,7 +5,7 @@ menu "Digital to analog converters"
 
 config AD5064
        tristate "Analog Devices AD5064 and similar multi-channel DAC driver"
-       depends on (SPI_MASTER || I2C)
+       depends on (SPI_MASTER && I2C!=m) || I2C
        help
          Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
          AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
@@ -27,7 +27,7 @@ config AD5360
 
 config AD5380
        tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
-       depends on (SPI_MASTER || I2C)
+       depends on (SPI_MASTER && I2C!=m) || I2C
        select REGMAP_I2C if I2C
        select REGMAP_SPI if SPI_MASTER
        help
@@ -57,7 +57,7 @@ config AD5624R_SPI
 
 config AD5446
        tristate "Analog Devices AD5446 and similar single channel DACs driver"
-       depends on (SPI_MASTER || I2C)
+       depends on (SPI_MASTER && I2C!=m) || I2C
        help
          Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
          AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
index a884252ac66b477db3a0fce966b40fb76ae456b2..e76d4ace53ff76d73917933a191da08b907cdc2f 100644 (file)
@@ -212,7 +212,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
                (pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS |
                ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N |
                ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) |
-               ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x9)));
+               ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3)));
 
        st->regs[ADF4350_REG3] = pdata->r3_user_settings &
                                 (ADF4350_REG3_12BIT_CLKDIV(0xFFF) |
index 795d100b4c36fea7c82785e4f4b8545a13ff7eab..98ddc323add011fe84e45abc4f8902ce9e7f09af 100644 (file)
@@ -124,7 +124,7 @@ static int __of_iio_channel_get(struct iio_channel *channel,
        channel->indio_dev = indio_dev;
        index = iiospec.args_count ? iiospec.args[0] : 0;
        if (index >= indio_dev->num_channels) {
-               return -EINVAL;
+               err = -EINVAL;
                goto err_put;
        }
        channel->channel = &indio_dev->channels[index];
@@ -450,7 +450,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
        s64 raw64 = raw;
        int ret;
 
-       ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
+       ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
        if (ret == 0)
                raw64 += offset;
 
index 71c2c71168028dbe6c8cfe6b3de314cc344d84cd..34fbc2f60a09debc72652b4e8ab8dcef0d09dbe7 100644 (file)
@@ -3269,9 +3269,9 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
 }
 
 static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
-                              void *ctx)
+                              void *ptr)
 {
-       struct net_device *ndev = (struct net_device *)ctx;
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
        struct cma_device *cma_dev;
        struct rdma_id_private *id_priv;
        int ret = NOTIFY_DONE;
index 23d734349d8e45e31bb81776f59757687e1e135c..a188d31785590e9cf59c7f3bf77926a11817e4b7 100644 (file)
@@ -1161,7 +1161,7 @@ static void netdev_removed(struct mlx4_ib_dev *dev, int port)
 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
                                void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct mlx4_ib_dev *ibdev;
        struct net_device *oldnd;
        struct mlx4_ib_iboe *iboe;
index 81c7b73695d26c4898735b7e58abcd49bca398d6..3b9afccaaade824370f5c0ea0d6d6ceb519e6090 100644 (file)
@@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
        if (dma_region) {
                struct qib_mregion *tmr;
 
-               tmr = rcu_dereference(dev->dma_mr);
+               tmr = rcu_access_pointer(dev->dma_mr);
                if (!tmr) {
                        qib_get_mr(mr);
                        rcu_assign_pointer(dev->dma_mr, mr);
index f19b0998a53cfbdffd644a627681864dfca049da..2e84ef859c5b9755d0940ee6bb812915dfcd69f0 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2004 Alex Aizman
  * Copyright (C) 2005 Mike Christie
  * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
  * maintained by openib-general@openib.org
  *
  * This software is available to you under a choice of one of two
index 06f578cde75b002bbc935e45516d3bf1266c2ca3..4f069c0d4c04371be4a156cfac59f24e23fbac02 100644 (file)
@@ -8,6 +8,7 @@
  *
  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index a00ccd1ca33337681afa9624b652c1912574ef07..b6d81a86c9760eed77c3724d3bb138d0e8cf8a6e 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 68ebb7fe072a0f347fa94e2e6572024deae4ebeb..7827baf455a1f45fa4fa20495fcf01dd370914ff 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 5278916c31038cdea28ad9a524d2aa18d653e21d..2c4941d0656b2e389cb0d38f978682ce73388311 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -292,10 +293,10 @@ out_err:
 }
 
 /**
- * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
+ * releases the FMR pool and QP objects, returns 0 on success,
  * -1 on failure
  */
-static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
 {
        int cq_index;
        BUG_ON(ib_conn == NULL);
@@ -314,13 +315,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
 
                rdma_destroy_qp(ib_conn->cma_id);
        }
-       /* if cma handler context, the caller acts s.t the cma destroy the id */
-       if (ib_conn->cma_id != NULL && can_destroy_id)
-               rdma_destroy_id(ib_conn->cma_id);
 
        ib_conn->fmr_pool = NULL;
        ib_conn->qp       = NULL;
-       ib_conn->cma_id   = NULL;
        kfree(ib_conn->page_vec);
 
        if (ib_conn->login_buf) {
@@ -415,11 +412,16 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
        list_del(&ib_conn->conn_list);
        mutex_unlock(&ig.connlist_mutex);
        iser_free_rx_descriptors(ib_conn);
-       iser_free_ib_conn_res(ib_conn, can_destroy_id);
+       iser_free_ib_conn_res(ib_conn);
        ib_conn->device = NULL;
        /* on EVENT_ADDR_ERROR there's no device yet for this conn */
        if (device != NULL)
                iser_device_try_release(device);
+       /* if cma handler context, the caller actually destroy the id */
+       if (ib_conn->cma_id != NULL && can_destroy_id) {
+               rdma_destroy_id(ib_conn->cma_id);
+               ib_conn->cma_id = NULL;
+       }
        iscsi_destroy_endpoint(ib_conn->ep);
 }
 
index b08ca7a9f76bf8ea70f63bfd6c291d4d71b5814d..3f3f0416fbdd52cb6664d56e54a14f8df344b589 100644 (file)
@@ -2226,6 +2226,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch)
        spin_unlock_irq(&sdev->spinlock);
 }
 
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+       struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       if (ch->in_shutdown) {
+               spin_unlock_irqrestore(&ch->spinlock, flags);
+               return true;
+       }
+
+       ch->in_shutdown = true;
+       target_sess_cmd_list_set_waiting(se_sess);
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+
+       return true;
+}
+
 /**
  * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
  * @cm_id: Pointer to the CM ID of the channel to be drained.
@@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id)
        spin_unlock_irq(&sdev->spinlock);
 
        if (do_reset) {
+               if (ch->sess)
+                       srpt_shutdown_session(ch->sess);
+
                ret = srpt_ch_qp_err(ch);
                if (ret < 0)
                        printk(KERN_ERR "Setting queue pair in error state"
@@ -2328,7 +2352,7 @@ static void srpt_release_channel_work(struct work_struct *w)
        se_sess = ch->sess;
        BUG_ON(!se_sess);
 
-       target_wait_for_sess_cmds(se_sess, 0);
+       target_wait_for_sess_cmds(se_sess);
 
        transport_deregister_session_configfs(se_sess);
        transport_deregister_session(se_sess);
@@ -3466,14 +3490,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
        spin_unlock_irqrestore(&ch->spinlock, flags);
 }
 
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
-       return true;
-}
-
 /**
  * srpt_close_session() - Forcibly close a session.
  *
index 4caf55cda7b170ba2928dc065641a521f66ebc47..3dae156905de53c1f9e9c5f0717f4ef4c3729803 100644 (file)
@@ -325,6 +325,7 @@ struct srpt_rdma_ch {
        u8                      sess_name[36];
        struct work_struct      release_work;
        struct completion       *release_done;
+       bool                    in_shutdown;
 };
 
 /**
index 2f78538e09d0f9e733be6f862a4de8e9d2c0301c..b2420ae19e148039147a33081a651d718e8d2e4d 100644 (file)
@@ -1379,6 +1379,7 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        struct synaptics_data old_priv = *priv;
+       unsigned char param[2];
        int retry = 0;
        int error;
 
@@ -1394,6 +1395,7 @@ static int synaptics_reconnect(struct psmouse *psmouse)
                         */
                        ssleep(1);
                }
+               ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETID);
                error = synaptics_detect(psmouse, 0);
        } while (error && ++retry < 3);
 
index 0bfd8cf252002d4095dc699cf8277931b1ec00a0..518282da6d850b180d56572c0517b62424f12e19 100644 (file)
@@ -342,10 +342,10 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
                        ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
 
-               switch (wacom->id[idx] & 0xfffff) {
+               switch (wacom->id[idx]) {
                case 0x812: /* Inking pen */
                case 0x801: /* Intuos3 Inking pen */
-               case 0x20802: /* Intuos4 Inking Pen */
+               case 0x120802: /* Intuos4/5 Inking Pen */
                case 0x012:
                        wacom->tool[idx] = BTN_TOOL_PENCIL;
                        break;
@@ -356,11 +356,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x823: /* Intuos3 Grip Pen */
                case 0x813: /* Intuos3 Classic Pen */
                case 0x885: /* Intuos3 Marker Pen */
-               case 0x802: /* Intuos4 General Pen */
-               case 0x804: /* Intuos4 Marker Pen */
-               case 0x40802: /* Intuos4 Classic Pen */
-               case 0x18802: /* DTH2242 Grip Pen */
+               case 0x802: /* Intuos4/5 13HD/24HD General Pen */
+               case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
                case 0x022:
+               case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
+               case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
+               case 0x160802: /* Cintiq 13HD Pro Pen */
+               case 0x180802: /* DTH2242 Pen */
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
 
@@ -391,10 +393,14 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x82b: /* Intuos3 Grip Pen Eraser */
                case 0x81b: /* Intuos3 Classic Pen Eraser */
                case 0x91b: /* Intuos3 Airbrush Eraser */
-               case 0x80c: /* Intuos4 Marker Pen Eraser */
-               case 0x80a: /* Intuos4 General Pen Eraser */
-               case 0x4080a: /* Intuos4 Classic Pen Eraser */
-               case 0x90a: /* Intuos4 Airbrush Eraser */
+               case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
+               case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+               case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+               case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+               case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+               case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+               case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
+               case 0x18080a: /* DTH2242 Eraser */
                        wacom->tool[idx] = BTN_TOOL_RUBBER;
                        break;
 
@@ -402,7 +408,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x912:
                case 0x112:
                case 0x913: /* Intuos3 Airbrush */
-               case 0x902: /* Intuos4 Airbrush */
+               case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
+               case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
                        wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
                        break;
 
@@ -533,10 +540,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                                input_report_key(input, BTN_8, (data[3] & 0x80));
                        }
                        if (data[1] | (data[2] & 0x01) | data[3]) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                } else if (features->type == DTK) {
@@ -546,6 +551,26 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        input_report_key(input, BTN_3, (data[6] & 0x08));
                        input_report_key(input, BTN_4, (data[6] & 0x10));
                        input_report_key(input, BTN_5, (data[6] & 0x20));
+                       if (data[6] & 0x3f) {
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
+               } else if (features->type == WACOM_13HD) {
+                       input_report_key(input, BTN_0, (data[3] & 0x01));
+                       input_report_key(input, BTN_1, (data[4] & 0x01));
+                       input_report_key(input, BTN_2, (data[4] & 0x02));
+                       input_report_key(input, BTN_3, (data[4] & 0x04));
+                       input_report_key(input, BTN_4, (data[4] & 0x08));
+                       input_report_key(input, BTN_5, (data[4] & 0x10));
+                       input_report_key(input, BTN_6, (data[4] & 0x20));
+                       input_report_key(input, BTN_7, (data[4] & 0x40));
+                       input_report_key(input, BTN_8, (data[4] & 0x80));
+                       if ((data[3] & 0x01) | data[4]) {
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
                } else if (features->type == WACOM_24HD) {
                        input_report_key(input, BTN_0, (data[6] & 0x01));
                        input_report_key(input, BTN_1, (data[6] & 0x02));
@@ -590,10 +615,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        }
 
                        if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
@@ -618,10 +641,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        }
 
                        if (data[2] | (data[3] & 0x01) | data[4] | data[5]) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                } else {
@@ -668,10 +689,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) |
                                data[2] | (data[3] & 0x1f) | data[4] | data[8] |
                                (data[7] & 0x01)) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                }
@@ -1301,6 +1320,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
        case INTUOS4L:
        case CINTIQ:
        case WACOM_BEE:
+       case WACOM_13HD:
        case WACOM_21UX2:
        case WACOM_22HD:
        case WACOM_24HD:
@@ -1530,15 +1550,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(KEY_PROG1, input_dev->keybit);
                __set_bit(KEY_PROG2, input_dev->keybit);
                __set_bit(KEY_PROG3, input_dev->keybit);
+
+               input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+               input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
                /* fall through */
 
        case DTK:
                for (i = 0; i < 6; i++)
                        __set_bit(BTN_0 + i, input_dev->keybit);
 
-               input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-               input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
-
                __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
 
                wacom_setup_cintiq(wacom_wac);
@@ -1579,6 +1599,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                wacom_setup_cintiq(wacom_wac);
                break;
 
+       case WACOM_13HD:
+               for (i = 0; i < 9; i++)
+                       __set_bit(BTN_0 + i, input_dev->keybit);
+
+               input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+               wacom_setup_cintiq(wacom_wac);
+               break;
+
        case INTUOS3:
        case INTUOS3L:
                __set_bit(BTN_4, input_dev->keybit);
@@ -1937,7 +1966,8 @@ static const struct wacom_features wacom_features_0xF4 =
          63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xF8 =
        { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS,   104480, 65600, 2047, /* Pen */
-         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
+         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
 static const struct wacom_features wacom_features_0xF6 =
        { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
          .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
@@ -1950,6 +1980,9 @@ static const struct wacom_features wacom_features_0xC5 =
 static const struct wacom_features wacom_features_0xC6 =
        { "Wacom Cintiq 12WX",    WACOM_PKGLEN_INTUOS,    53020, 33440, 1023,
          63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x304 =
+       { "Wacom Cintiq 13HD",    WACOM_PKGLEN_INTUOS,    59552, 33848, 1023,
+         63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xC7 =
        { "Wacom DTU1931",        WACOM_PKGLEN_GRAPHIRE,  37832, 30305,  511,
          0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1959,6 +1992,9 @@ static const struct wacom_features wacom_features_0xCE =
 static const struct wacom_features wacom_features_0xF0 =
        { "Wacom DTU1631",        WACOM_PKGLEN_GRAPHIRE,  34623, 19553,  511,
          0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x57 =
+       { "Wacom DTK2241",        WACOM_PKGLEN_INTUOS,    95840, 54260, 2047,
+         63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES};
 static const struct wacom_features wacom_features_0x59 = /* Pen */
        { "Wacom DTH2242",        WACOM_PKGLEN_INTUOS,    95840, 54260, 2047,
          63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
@@ -1972,6 +2008,13 @@ static const struct wacom_features wacom_features_0xCC =
 static const struct wacom_features wacom_features_0xFA =
        { "Wacom Cintiq 22HD",    WACOM_PKGLEN_INTUOS,    95840, 54260, 2047,
          63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x5B =
+       { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS,      95840, 54260, 2047,
+         63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
+static const struct wacom_features wacom_features_0x5E =
+       { "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 };
 static const struct wacom_features wacom_features_0x90 =
        { "Wacom ISDv4 90",       WACOM_PKGLEN_GRAPHIRE,  26202, 16325,  255,
          0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2001,7 +2044,7 @@ static const struct wacom_features wacom_features_0xE5 =
 static const struct wacom_features wacom_features_0xE6 =
        { "Wacom ISDv4 E6",       WACOM_PKGLEN_TPC2FG,    27760, 15694,  255,
          0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
-       .touch_max = 2 };
+         .touch_max = 2 };
 static const struct wacom_features wacom_features_0xEC =
        { "Wacom ISDv4 EC",       WACOM_PKGLEN_GRAPHIRE,  25710, 14500,  255,
          0, TABLETPC,    WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2143,8 +2186,11 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x43) },
        { USB_DEVICE_WACOM(0x44) },
        { USB_DEVICE_WACOM(0x45) },
+       { USB_DEVICE_WACOM(0x57) },
        { USB_DEVICE_WACOM(0x59) },
        { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
+       { USB_DEVICE_WACOM(0x5B) },
+       { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) },
        { USB_DEVICE_WACOM(0xB0) },
        { USB_DEVICE_WACOM(0xB1) },
        { USB_DEVICE_WACOM(0xB2) },
@@ -2205,6 +2251,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x100) },
        { USB_DEVICE_WACOM(0x101) },
        { USB_DEVICE_WACOM(0x10D) },
+       { USB_DEVICE_WACOM(0x304) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_WACOM(0xF4) },
index 5f9a7721e16cf36a1a92021a03ddb45648292b88..dfc9e08e7f70458c0a41378ae7034a027e52d0ea 100644 (file)
@@ -82,6 +82,7 @@ enum {
        WACOM_24HD,
        CINTIQ,
        WACOM_BEE,
+       WACOM_13HD,
        WACOM_MO,
        WIRELESS,
        BAMBOO_PT,
index 17c9097f3b5ddddd40fad74345b1d2376c4ada7a..39f3df8670c311ec2373ec4ddad585ce4217743b 100644 (file)
@@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client,
        input_set_abs_params(input_dev,
                             ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
        input_set_abs_params(input_dev,
-                            ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+                            ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0);
        input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0);
 
        input_set_drvdata(input_dev, ts);
index 29889bbdcc6d54c4a975de0905d65c5b285595db..63b3d4eb0ef768b4cded4a2bfbad57a0b8563b81 100644 (file)
@@ -76,16 +76,10 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
 {
        u32 irqnr;
 
-       do {
-               irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
-               if (irqnr != 0x7f) {
-                       __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
-                       irqnr = irq_find_mapping(icoll_domain, irqnr);
-                       handle_IRQ(irqnr, regs);
-                       continue;
-               }
-               break;
-       } while (1);
+       irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
+       __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
+       irqnr = irq_find_mapping(icoll_domain, irqnr);
+       handle_IRQ(irqnr, regs);
 }
 
 static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
index 065b7a31a47833a3cbe444f3e8779611ca2ede3a..47a52ab580d863ba2266d26f228befc2695218f1 100644 (file)
@@ -119,7 +119,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
 
        /* Skip invalid IRQs, only register handlers for the real ones */
        if (!(f->valid & BIT(hwirq)))
-               return -ENOTSUPP;
+               return -EPERM;
        irq_set_chip_data(irq, f);
        irq_set_chip_and_handler(irq, &f->chip,
                                handle_level_irq);
index 884d11c7355fb5d5cc0636c25b06e1ba1b856ef5..2bbb00404cf5001df2c6f8654de6d467d58f3b6d 100644 (file)
@@ -197,7 +197,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
 
        /* Skip invalid IRQs, only register handlers for the real ones */
        if (!(v->valid_sources & (1 << hwirq)))
-               return -ENOTSUPP;
+               return -EPERM;
        irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
        irq_set_chip_data(irq, v->base);
        set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
index 9b1b274c7d256d96667c42150e19f7cf547d3f1f..c123709acf823829a274db2ca6ced2060bbae341 100644 (file)
@@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr)
 
 static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
 {
-       if (contr - 1 >= CAPI_MAXCONTR)
+       if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
                return NULL;
 
        return capi_controller[contr - 1];
@@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
 {
        lockdep_assert_held(&capi_controller_lock);
 
-       if (applid - 1 >= CAPI_MAXAPPL)
+       if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
                return NULL;
 
        return capi_applications[applid - 1];
@@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
 
 static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
 {
-       if (applid - 1 >= CAPI_MAXAPPL)
+       if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
                return NULL;
 
        return rcu_dereference(capi_applications[applid - 1]);
index 88d657dff47454a3c374469db0ac51d03c53e502..8b98d53d99764be297362969fc1c88d1865639ec 100644 (file)
@@ -885,7 +885,7 @@ isdn_net_log_skb(struct sk_buff *skb, isdn_net_local *lp)
 
        addinfo[0] = '\0';
        /* This check stolen from 2.1.72 dev_queue_xmit_nit() */
-       if (p < skb->data || skb->network_header >= skb->tail) {
+       if (p < skb->data || skb_network_header(skb) >= skb_tail_pointer(skb)) {
                /* fall back to old isdn_net_log_packet method() */
                char *buf = skb->data;
 
index a0d931bcb37c5cda58766ff365a0a42dd112f45d..b02b679abf3183a7b6d798c212ed21b349453548 100644 (file)
@@ -107,6 +107,10 @@ static int create_gpio_led(const struct gpio_led *template,
                return 0;
        }
 
+       ret = devm_gpio_request(parent, template->gpio, template->name);
+       if (ret < 0)
+               return ret;
+
        led_dat->cdev.name = template->name;
        led_dat->cdev.default_trigger = template->default_trigger;
        led_dat->gpio = template->gpio;
@@ -126,10 +130,7 @@ static int create_gpio_led(const struct gpio_led *template,
        if (!template->retain_state_suspended)
                led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
 
-       ret = devm_gpio_request_one(parent, template->gpio,
-                                   (led_dat->active_low ^ state) ?
-                                   GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
-                                   template->name);
+       ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
        if (ret < 0)
                return ret;
 
index ee14662ed5cef46b845aeeb3557e55dcd6580efb..98cae529373f44aa202a1a5c0f9d2f2259ab0e98 100644 (file)
@@ -47,37 +47,37 @@ static struct ot200_led leds[] = {
        {
                .name = "led_1",
                .port = 0x49,
-               .mask = BIT(7),
+               .mask = BIT(6),
        },
        {
                .name = "led_2",
                .port = 0x49,
-               .mask = BIT(6),
+               .mask = BIT(5),
        },
        {
                .name = "led_3",
                .port = 0x49,
-               .mask = BIT(5),
+               .mask = BIT(4),
        },
        {
                .name = "led_4",
                .port = 0x49,
-               .mask = BIT(4),
+               .mask = BIT(3),
        },
        {
                .name = "led_5",
                .port = 0x49,
-               .mask = BIT(3),
+               .mask = BIT(2),
        },
        {
                .name = "led_6",
                .port = 0x49,
-               .mask = BIT(2),
+               .mask = BIT(1),
        },
        {
                .name = "led_7",
                .port = 0x49,
-               .mask = BIT(1),
+               .mask = BIT(0),
        }
 };
 
index 699187ab380099a32d926a83dde84d96e7124e85..5b9ac32801c7604b25c64804fb888aa1ebbbceab 100644 (file)
@@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
                        kill_guest(&lg->cpus[0],
                                   "Cannot populate switcher mapping");
                }
+               lg->pgdirs[pgdir].last_host_cpu = -1;
        }
 }
 
index 05c220d05e232454129b6a2db5115b7265e9a213..f950c9d29f3e5400a97b7eb43400d6e2a79b9e19 100644 (file)
@@ -1,7 +1,6 @@
 
 config BCACHE
        tristate "Block device as cache"
-       select CLOSURES
        ---help---
        Allows a block device to be used as cache for other devices; uses
        a btree for indexing and the layout is optimized for SSDs.
index 340146d7c17f999a3efcdf5998901838bbd80ae0..d3e15b42a4ab97655d989c3fe8240b09613acdc4 100644 (file)
@@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
 struct cache_set *bch_cache_set_alloc(struct cache_sb *);
 void bch_btree_cache_free(struct cache_set *);
 int bch_btree_cache_alloc(struct cache_set *);
-void bch_writeback_init_cached_dev(struct cached_dev *);
+void bch_cached_dev_writeback_init(struct cached_dev *);
 void bch_moving_init_cache_set(struct cache_set *);
 
 void bch_cache_allocator_exit(struct cache *ca);
index 64e679449c2ab1db6981b67ba708a43c3dcf4aa4..b8730e714d6930d77eb5ada556b9e4a341d46ab6 100644 (file)
@@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
 };
 static KTYPE(bch_stats);
 
-static void scale_accounting(unsigned long data);
-
-void bch_cache_accounting_init(struct cache_accounting *acc,
-                              struct closure *parent)
-{
-       kobject_init(&acc->total.kobj,          &bch_stats_ktype);
-       kobject_init(&acc->five_minute.kobj,    &bch_stats_ktype);
-       kobject_init(&acc->hour.kobj,           &bch_stats_ktype);
-       kobject_init(&acc->day.kobj,            &bch_stats_ktype);
-
-       closure_init(&acc->cl, parent);
-       init_timer(&acc->timer);
-       acc->timer.expires      = jiffies + accounting_delay;
-       acc->timer.data         = (unsigned long) acc;
-       acc->timer.function     = scale_accounting;
-       add_timer(&acc->timer);
-}
-
 int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
                                   struct kobject *parent)
 {
@@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
        atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
        atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
 }
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+                              struct closure *parent)
+{
+       kobject_init(&acc->total.kobj,          &bch_stats_ktype);
+       kobject_init(&acc->five_minute.kobj,    &bch_stats_ktype);
+       kobject_init(&acc->hour.kobj,           &bch_stats_ktype);
+       kobject_init(&acc->day.kobj,            &bch_stats_ktype);
+
+       closure_init(&acc->cl, parent);
+       init_timer(&acc->timer);
+       acc->timer.expires      = jiffies + accounting_delay;
+       acc->timer.data         = (unsigned long) acc;
+       acc->timer.function     = scale_accounting;
+       add_timer(&acc->timer);
+}
index c8046bc4aa57e0f12df3db3bc2495eea3be97564..f88e2b653a3fc9c82a7b308c8988cd10eda0c96b 100644 (file)
@@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode)
        return 0;
 }
 
-static int release_dev(struct gendisk *b, fmode_t mode)
+static void release_dev(struct gendisk *b, fmode_t mode)
 {
        struct bcache_device *d = b->private_data;
        closure_put(&d->cl);
-       return 0;
 }
 
 static int ioctl_dev(struct block_device *b, fmode_t mode,
@@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
 
        if (d->c)
                bcache_device_detach(d);
-
-       if (d->disk)
+       if (d->disk && d->disk->flags & GENHD_FL_UP)
                del_gendisk(d->disk);
        if (d->disk && d->disk->queue)
                blk_cleanup_queue(d->disk->queue);
@@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
        if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
            !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
                                sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
-           bio_split_pool_init(&d->bio_split_hook))
-
-               return -ENOMEM;
-
-       d->disk = alloc_disk(1);
-       if (!d->disk)
+           bio_split_pool_init(&d->bio_split_hook) ||
+           !(d->disk = alloc_disk(1)) ||
+           !(q = blk_alloc_queue(GFP_KERNEL)))
                return -ENOMEM;
 
        snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
        d->disk->fops           = &bcache_ops;
        d->disk->private_data   = d;
 
-       q = blk_alloc_queue(GFP_KERNEL);
-       if (!q)
-               return -ENOMEM;
-
        blk_queue_make_request(q, NULL);
        d->disk->queue                  = q;
        q->queuedata                    = d;
@@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
 
        mutex_lock(&bch_register_lock);
 
-       bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+       if (atomic_read(&dc->running))
+               bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
        bcache_device_free(&dc->disk);
        list_del(&dc->list);
 
        mutex_unlock(&bch_register_lock);
 
        if (!IS_ERR_OR_NULL(dc->bdev)) {
-               blk_sync_queue(bdev_get_queue(dc->bdev));
+               if (dc->bdev->bd_disk)
+                       blk_sync_queue(bdev_get_queue(dc->bdev));
+
                blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
        }
 
@@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
 
 static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
 {
-       int err;
+       int ret;
        struct io *io;
-
-       closure_init(&dc->disk.cl, NULL);
-       set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
+       struct request_queue *q = bdev_get_queue(dc->bdev);
 
        __module_get(THIS_MODULE);
        INIT_LIST_HEAD(&dc->list);
+       closure_init(&dc->disk.cl, NULL);
+       set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
        kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
-
-       bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
-
-       err = bcache_device_init(&dc->disk, block_size);
-       if (err)
-               goto err;
-
-       spin_lock_init(&dc->io_lock);
-       closure_init_unlocked(&dc->sb_write);
        INIT_WORK(&dc->detach, cached_dev_detach_finish);
+       closure_init_unlocked(&dc->sb_write);
+       INIT_LIST_HEAD(&dc->io_lru);
+       spin_lock_init(&dc->io_lock);
+       bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
 
        dc->sequential_merge            = true;
        dc->sequential_cutoff           = 4 << 20;
 
-       INIT_LIST_HEAD(&dc->io_lru);
-       dc->sb_bio.bi_max_vecs  = 1;
-       dc->sb_bio.bi_io_vec    = dc->sb_bio.bi_inline_vecs;
-
        for (io = dc->io; io < dc->io + RECENT_IO; io++) {
                list_add(&io->lru, &dc->io_lru);
                hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
        }
 
-       bch_writeback_init_cached_dev(dc);
+       ret = bcache_device_init(&dc->disk, block_size);
+       if (ret)
+               return ret;
+
+       set_capacity(dc->disk.disk,
+                    dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
+
+       dc->disk.disk->queue->backing_dev_info.ra_pages =
+               max(dc->disk.disk->queue->backing_dev_info.ra_pages,
+                   q->backing_dev_info.ra_pages);
+
+       bch_cached_dev_request_init(dc);
+       bch_cached_dev_writeback_init(dc);
        return 0;
-err:
-       bcache_device_stop(&dc->disk);
-       return err;
 }
 
 /* Cached device - bcache superblock */
 
-static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
+static void register_bdev(struct cache_sb *sb, struct page *sb_page,
                                 struct block_device *bdev,
                                 struct cached_dev *dc)
 {
        char name[BDEVNAME_SIZE];
        const char *err = "cannot allocate memory";
-       struct gendisk *g;
        struct cache_set *c;
 
-       if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
-               return err;
-
        memcpy(&dc->sb, sb, sizeof(struct cache_sb));
-       dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
        dc->bdev = bdev;
        dc->bdev->bd_holder = dc;
 
-       g = dc->disk.disk;
-
-       set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
-
-       g->queue->backing_dev_info.ra_pages =
-               max(g->queue->backing_dev_info.ra_pages,
-                   bdev->bd_queue->backing_dev_info.ra_pages);
+       bio_init(&dc->sb_bio);
+       dc->sb_bio.bi_max_vecs  = 1;
+       dc->sb_bio.bi_io_vec    = dc->sb_bio.bi_inline_vecs;
+       dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+       get_page(sb_page);
 
-       bch_cached_dev_request_init(dc);
+       if (cached_dev_init(dc, sb->block_size << 9))
+               goto err;
 
        err = "error creating kobject";
        if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
        if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
                goto err;
 
+       pr_info("registered backing device %s", bdevname(bdev, name));
+
        list_add(&dc->list, &uncached_devices);
        list_for_each_entry(c, &bch_cache_sets, list)
                bch_cached_dev_attach(dc, c);
@@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
            BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
                bch_cached_dev_run(dc);
 
-       return NULL;
+       return;
 err:
-       kobject_put(&dc->disk.kobj);
        pr_notice("error opening %s: %s", bdevname(bdev, name), err);
-       /*
-        * Return NULL instead of an error because kobject_put() cleans
-        * everything up
-        */
-       return NULL;
+       bcache_device_stop(&dc->disk);
 }
 
 /* Flash only volumes */
@@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
        size_t free;
        struct bucket *b;
 
-       if (!ca)
-               return -ENOMEM;
-
        __module_get(THIS_MODULE);
        kobject_init(&ca->kobj, &bch_cache_ktype);
 
-       memcpy(&ca->sb, sb, sizeof(struct cache_sb));
-
        INIT_LIST_HEAD(&ca->discards);
 
-       bio_init(&ca->sb_bio);
-       ca->sb_bio.bi_max_vecs  = 1;
-       ca->sb_bio.bi_io_vec    = ca->sb_bio.bi_inline_vecs;
-
        bio_init(&ca->journal.bio);
        ca->journal.bio.bi_max_vecs = 8;
        ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
            !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
            !init_fifo(&ca->unused,     free << 2, GFP_KERNEL) ||
            !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
-           !(ca->buckets       = vmalloc(sizeof(struct bucket) *
+           !(ca->buckets       = vzalloc(sizeof(struct bucket) *
                                          ca->sb.nbuckets)) ||
            !(ca->prio_buckets  = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
                                          2, GFP_KERNEL)) ||
            !(ca->disk_buckets  = alloc_bucket_pages(GFP_KERNEL, ca)) ||
            !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
            bio_split_pool_init(&ca->bio_split_hook))
-               goto err;
+               return -ENOMEM;
 
        ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
 
-       memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
        for_each_bucket(b, ca)
                atomic_set(&b->pin, 0);
 
@@ -1766,22 +1741,28 @@ err:
        return -ENOMEM;
 }
 
-static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
+static void register_cache(struct cache_sb *sb, struct page *sb_page,
                                  struct block_device *bdev, struct cache *ca)
 {
        char name[BDEVNAME_SIZE];
        const char *err = "cannot allocate memory";
 
-       if (cache_alloc(sb, ca) != 0)
-               return err;
-
-       ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+       memcpy(&ca->sb, sb, sizeof(struct cache_sb));
        ca->bdev = bdev;
        ca->bdev->bd_holder = ca;
 
+       bio_init(&ca->sb_bio);
+       ca->sb_bio.bi_max_vecs  = 1;
+       ca->sb_bio.bi_io_vec    = ca->sb_bio.bi_inline_vecs;
+       ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+       get_page(sb_page);
+
        if (blk_queue_discard(bdev_get_queue(ca->bdev)))
                ca->discard = CACHE_DISCARD(&ca->sb);
 
+       if (cache_alloc(sb, ca) != 0)
+               goto err;
+
        err = "error creating kobject";
        if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
                goto err;
@@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
                goto err;
 
        pr_info("registered cache device %s", bdevname(bdev, name));
-
-       return NULL;
+       return;
 err:
+       pr_notice("error opening %s: %s", bdevname(bdev, name), err);
        kobject_put(&ca->kobj);
-       pr_info("error opening %s: %s", bdevname(bdev, name), err);
-       /* Return NULL instead of an error because kobject_put() cleans
-        * everything up
-        */
-       return NULL;
 }
 
 /* Global interfaces/init */
@@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
        bdev = blkdev_get_by_path(strim(path),
                                  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
                                  sb);
-       if (bdev == ERR_PTR(-EBUSY))
-               err = "device busy";
-
-       if (IS_ERR(bdev) ||
-           set_blocksize(bdev, 4096))
+       if (IS_ERR(bdev)) {
+               if (bdev == ERR_PTR(-EBUSY))
+                       err = "device busy";
                goto err;
+       }
+
+       err = "failed to set blocksize";
+       if (set_blocksize(bdev, 4096))
+               goto err_close;
 
        err = read_super(sb, bdev, &sb_page);
        if (err)
@@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
 
        if (SB_IS_BDEV(sb)) {
                struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+               if (!dc)
+                       goto err_close;
 
-               err = register_bdev(sb, sb_page, bdev, dc);
+               register_bdev(sb, sb_page, bdev, dc);
        } else {
                struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+               if (!ca)
+                       goto err_close;
 
-               err = register_cache(sb, sb_page, bdev, ca);
+               register_cache(sb, sb_page, bdev, ca);
        }
-
-       if (err) {
-               /* register_(bdev|cache) will only return an error if they
-                * didn't get far enough to create the kobject - if they did,
-                * the kobject destructor will do this cleanup.
-                */
+out:
+       if (sb_page)
                put_page(sb_page);
-err_close:
-               blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
-               if (attr != &ksysfs_register_quiet)
-                       pr_info("error opening %s: %s", path, err);
-               ret = -EINVAL;
-       }
-
        kfree(sb);
        kfree(path);
        mutex_unlock(&bch_register_lock);
        module_put(THIS_MODULE);
        return ret;
+
+err_close:
+       blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+err:
+       if (attr != &ksysfs_register_quiet)
+               pr_info("error opening %s: %s", path, err);
+       ret = -EINVAL;
+       goto out;
 }
 
 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
index 93e7e31a4bd34d97f1a5e9fbc5c9082b2b77ce57..2714ed3991d1b747518aeb22d70222d1653d5e40 100644 (file)
@@ -375,7 +375,7 @@ err:
        refill_dirty(cl);
 }
 
-void bch_writeback_init_cached_dev(struct cached_dev *dc)
+void bch_cached_dev_writeback_init(struct cached_dev *dc)
 {
        closure_init_unlocked(&dc->writeback);
        init_rwsem(&dc->writeback_lock);
index 759cffc45cabce8414c883f329b52fd56513bccf..88f2f802d528be23b8e64c26085913677082be03 100644 (file)
@@ -2188,7 +2188,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
 
        *need_commit = false;
 
-       metadata_dev_size = get_metadata_dev_size(pool->md_dev);
+       metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
 
        r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
        if (r) {
@@ -2197,7 +2197,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
        }
 
        if (metadata_dev_size < sb_metadata_dev_size) {
-               DMERR("metadata device (%llu sectors) too small: expected %llu",
+               DMERR("metadata device (%llu blocks) too small: expected %llu",
                      metadata_dev_size, sb_metadata_dev_size);
                return -EINVAL;
 
index 681d1099a2d58936864b3b63610a31f38a908219..9b82377a833bd6572b628c79426ca153781fd712 100644 (file)
@@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev)
 
 static void __md_stop_writes(struct mddev *mddev)
 {
+       set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        if (mddev->sync_thread) {
-               set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                md_reap_sync_thread(mddev);
        }
index 55951182af73680d3b7f40d32cac1302062dbe74..6e17f8181c4b923eb4044838d5bfc9dbb6d625fd 100644 (file)
@@ -417,7 +417,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
 
                r1_bio->bios[mirror] = NULL;
                to_put = bio;
-               set_bit(R1BIO_Uptodate, &r1_bio->state);
+               /*
+                * Do not set R1BIO_Uptodate if the current device is
+                * rebuilding or Faulty. This is because we cannot use
+                * such device for properly reading the data back (we could
+                * potentially use it, if the current write would have felt
+                * before rdev->recovery_offset, but for simplicity we don't
+                * check this here.
+                */
+               if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
+                   !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
+                       set_bit(R1BIO_Uptodate, &r1_bio->state);
 
                /* Maybe we can clear some bad blocks. */
                if (is_badblock(conf->mirrors[mirror].rdev,
@@ -870,17 +880,17 @@ static void allow_barrier(struct r1conf *conf)
        wake_up(&conf->wait_barrier);
 }
 
-static void freeze_array(struct r1conf *conf)
+static void freeze_array(struct r1conf *conf, int extra)
 {
        /* stop syncio and normal IO and wait for everything to
         * go quite.
         * We increment barrier and nr_waiting, and then
-        * wait until nr_pending match nr_queued+1
+        * wait until nr_pending match nr_queued+extra
         * This is called in the context of one normal IO request
         * that has failed. Thus any sync request that might be pending
         * will be blocked by nr_pending, and we need to wait for
         * pending IO requests to complete or be queued for re-try.
-        * Thus the number queued (nr_queued) plus this request (1)
+        * Thus the number queued (nr_queued) plus this request (extra)
         * must match the number of pending IOs (nr_pending) before
         * we continue.
         */
@@ -888,7 +898,7 @@ static void freeze_array(struct r1conf *conf)
        conf->barrier++;
        conf->nr_waiting++;
        wait_event_lock_irq_cmd(conf->wait_barrier,
-                               conf->nr_pending == conf->nr_queued+1,
+                               conf->nr_pending == conf->nr_queued+extra,
                                conf->resync_lock,
                                flush_pending_writes(conf));
        spin_unlock_irq(&conf->resync_lock);
@@ -1544,8 +1554,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                 * we wait for all outstanding requests to complete.
                 */
                synchronize_sched();
-               raise_barrier(conf);
-               lower_barrier(conf);
+               freeze_array(conf, 0);
+               unfreeze_array(conf);
                clear_bit(Unmerged, &rdev->flags);
        }
        md_integrity_add_rdev(rdev, mddev);
@@ -1595,11 +1605,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
                         */
                        struct md_rdev *repl =
                                conf->mirrors[conf->raid_disks + number].rdev;
-                       raise_barrier(conf);
+                       freeze_array(conf, 0);
                        clear_bit(Replacement, &repl->flags);
                        p->rdev = repl;
                        conf->mirrors[conf->raid_disks + number].rdev = NULL;
-                       lower_barrier(conf);
+                       unfreeze_array(conf);
                        clear_bit(WantReplacement, &rdev->flags);
                } else
                        clear_bit(WantReplacement, &rdev->flags);
@@ -2195,7 +2205,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
         * frozen
         */
        if (mddev->ro == 0) {
-               freeze_array(conf);
+               freeze_array(conf, 1);
                fix_read_error(conf, r1_bio->read_disk,
                               r1_bio->sector, r1_bio->sectors);
                unfreeze_array(conf);
@@ -2780,8 +2790,8 @@ static int run(struct mddev *mddev)
                return PTR_ERR(conf);
 
        if (mddev->queue)
-               blk_queue_max_write_same_sectors(mddev->queue,
-                                                mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue, 0);
+
        rdev_for_each(rdev, mddev) {
                if (!mddev->gendisk)
                        continue;
@@ -2963,7 +2973,7 @@ static int raid1_reshape(struct mddev *mddev)
                return -ENOMEM;
        }
 
-       raise_barrier(conf);
+       freeze_array(conf, 0);
 
        /* ok, everything is stopped */
        oldpool = conf->r1bio_pool;
@@ -2994,7 +3004,7 @@ static int raid1_reshape(struct mddev *mddev)
        conf->raid_disks = mddev->raid_disks = raid_disks;
        mddev->delta_disks = 0;
 
-       lower_barrier(conf);
+       unfreeze_array(conf);
 
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
index 59d4daa5f4c7a32c245ef954f24650fe75084117..6ddae2501b9ae0fb2eb7119a4bf5467d9b6a90b2 100644 (file)
@@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
                sector_t first_bad;
                int bad_sectors;
 
-               set_bit(R10BIO_Uptodate, &r10_bio->state);
+               /*
+                * Do not set R10BIO_Uptodate if the current device is
+                * rebuilding or Faulty. This is because we cannot use
+                * such device for properly reading the data back (we could
+                * potentially use it, if the current write would have felt
+                * before rdev->recovery_offset, but for simplicity we don't
+                * check this here.
+                */
+               if (test_bit(In_sync, &rdev->flags) &&
+                   !test_bit(Faulty, &rdev->flags))
+                       set_bit(R10BIO_Uptodate, &r10_bio->state);
 
                /* Maybe we can clear some bad blocks. */
                if (is_badblock(rdev,
@@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
        wake_up(&conf->wait_barrier);
 }
 
-static void freeze_array(struct r10conf *conf)
+static void freeze_array(struct r10conf *conf, int extra)
 {
        /* stop syncio and normal IO and wait for everything to
         * go quiet.
         * We increment barrier and nr_waiting, and then
-        * wait until nr_pending match nr_queued+1
+        * wait until nr_pending match nr_queued+extra
         * This is called in the context of one normal IO request
         * that has failed. Thus any sync request that might be pending
         * will be blocked by nr_pending, and we need to wait for
         * pending IO requests to complete or be queued for re-try.
-        * Thus the number queued (nr_queued) plus this request (1)
+        * Thus the number queued (nr_queued) plus this request (extra)
         * must match the number of pending IOs (nr_pending) before
         * we continue.
         */
@@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
        conf->barrier++;
        conf->nr_waiting++;
        wait_event_lock_irq_cmd(conf->wait_barrier,
-                               conf->nr_pending == conf->nr_queued+1,
+                               conf->nr_pending == conf->nr_queued+extra,
                                conf->resync_lock,
                                flush_pending_writes(conf));
 
@@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                 * we wait for all outstanding requests to complete.
                 */
                synchronize_sched();
-               raise_barrier(conf, 0);
-               lower_barrier(conf);
+               freeze_array(conf, 0);
+               unfreeze_array(conf);
                clear_bit(Unmerged, &rdev->flags);
        }
        md_integrity_add_rdev(rdev, mddev);
@@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
        r10_bio->devs[slot].bio = NULL;
 
        if (mddev->ro == 0) {
-               freeze_array(conf);
+               freeze_array(conf, 1);
                fix_read_error(conf, mddev, r10_bio);
                unfreeze_array(conf);
        } else
@@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)
        if (mddev->queue) {
                blk_queue_max_discard_sectors(mddev->queue,
                                              mddev->chunk_sectors);
-               blk_queue_max_write_same_sectors(mddev->queue,
-                                                mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue, 0);
                blk_queue_io_min(mddev->queue, chunk_size);
                if (conf->geo.raid_disks % conf->geo.near_copies)
                        blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
index 9359828ffe264d3313ee77de993ea4c5147f1205..05e4a105b9c706bb91bf4490ef2ce5f724601477 100644 (file)
@@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                                bi->bi_rw |= REQ_FLUSH;
 
+                       bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
                        bi->bi_size = STRIPE_SIZE;
@@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        else
                                rbi->bi_sector = (sh->sector
                                                  + rrdev->data_offset);
+                       rbi->bi_vcnt = 1;
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
@@ -5464,7 +5466,7 @@ static int run(struct mddev *mddev)
                if (mddev->major_version == 0 &&
                    mddev->minor_version > 90)
                        rdev->recovery_offset = reshape_offset;
-                       
+
                if (rdev->recovery_offset < reshape_offset) {
                        /* We need to check old and new layout */
                        if (!only_parity(rdev->raid_disk,
@@ -5587,6 +5589,8 @@ static int run(struct mddev *mddev)
                 */
                mddev->queue->limits.discard_zeroes_data = 0;
 
+               blk_queue_max_write_same_sectors(mddev->queue, 0);
+
                rdev_for_each(rdev, mddev) {
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
index ca2754a3cd63d83eefb8927ae072f6732e852716..5e040085c2ffe4b6550db7a029db359f37765dda 100644 (file)
@@ -176,7 +176,7 @@ struct zoran_fh;
 
 struct zoran_mapping {
        struct zoran_fh *fh;
-       int count;
+       atomic_t count;
 };
 
 struct zoran_buffer {
index 1168a84a737dafae1dbf6e7e3bbe195b891815d6..d133c30c3fdccbc21fb54b3f08b66966835d3565 100644 (file)
@@ -2803,8 +2803,7 @@ static void
 zoran_vm_open (struct vm_area_struct *vma)
 {
        struct zoran_mapping *map = vma->vm_private_data;
-
-       map->count++;
+       atomic_inc(&map->count);
 }
 
 static void
@@ -2815,7 +2814,7 @@ zoran_vm_close (struct vm_area_struct *vma)
        struct zoran *zr = fh->zr;
        int i;
 
-       if (--map->count > 0)
+       if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock))
                return;
 
        dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
@@ -2828,14 +2827,16 @@ zoran_vm_close (struct vm_area_struct *vma)
        kfree(map);
 
        /* Any buffers still mapped? */
-       for (i = 0; i < fh->buffers.num_buffers; i++)
-               if (fh->buffers.buffer[i].map)
+       for (i = 0; i < fh->buffers.num_buffers; i++) {
+               if (fh->buffers.buffer[i].map) {
+                       mutex_unlock(&zr->resource_lock);
                        return;
+               }
+       }
 
        dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
                __func__, mode_name(fh->map_mode));
 
-       mutex_lock(&zr->resource_lock);
 
        if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
                if (fh->buffers.active != ZORAN_FREE) {
@@ -2939,7 +2940,7 @@ zoran_mmap (struct file           *file,
                goto mmap_unlock_and_return;
        }
        map->fh = fh;
-       map->count = 1;
+       atomic_set(&map->count, 1);
 
        vma->vm_ops = &zoran_vm_ops;
        vma->vm_flags |= VM_DONTEXPAND;
index 477268a2415fd06d58e47eddc5e157b4940fd8c3..d338b19da544c92e8e1c73c66092ca96575bcb59 100644 (file)
@@ -2150,6 +2150,9 @@ static int __init omap_vout_probe(struct platform_device *pdev)
        struct omap_dss_device *def_display;
        struct omap2video_device *vid_dev = NULL;
 
+       if (omapdss_is_initialized() == false)
+               return -EPROBE_DEFER;
+
        ret = omapdss_compat_init();
        if (ret) {
                dev_err(&pdev->dev, "failed to init dss\n");
index cadf1cc19aafb918d03665815caffb0c11839155..04644e7b42b123ca5407840d11a9e412a26dc6dd 100644 (file)
@@ -1560,12 +1560,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, emif);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(emif->dev, "%s: error getting memory resource\n",
-                       __func__);
-               goto error;
-       }
-
        emif->base = devm_ioremap_resource(emif->dev, res);
        if (IS_ERR(emif->base))
                goto error;
index d9aed1593e5d34855b488a89750da5ed68f1a0fe..d54e985748b78403956a0b7ba0d2b8634b949244 100644 (file)
@@ -579,7 +579,7 @@ config AB8500_CORE
 
 config AB8500_DEBUG
        bool "Enable debug info via debugfs"
-       depends on AB8500_CORE && DEBUG_FS
+       depends on AB8500_GPADC && DEBUG_FS
        default y if DEBUG_FS
        help
          Select this option if you want debug information using the debug
@@ -818,6 +818,7 @@ config MFD_TPS65910
 config MFD_TPS65912
        bool "TI TPS65912 Power Management chip"
        depends on GPIOLIB
+       select MFD_CORE
        help
          If you say yes here you get support for the TPS65912 series of
          PM chips.
index 8e8a016effe95251cca5692fed6c7b4442139630..258b367e39891468eb4dd0a9dabf05558a75446d 100644 (file)
@@ -867,6 +867,15 @@ static struct resource ab8500_chargalg_resources[] = {};
 
 #ifdef CONFIG_DEBUG_FS
 static struct resource ab8500_debug_resources[] = {
+       {
+               .name   = "IRQ_AB8500",
+               /*
+                * Number will be filled in. NOTE: this is deliberately
+                * not flagged as an IRQ in ordet to avoid remapping using
+                * the irqdomain in the MFD core, so that this IRQ passes
+                * unremapped to the debug code.
+                */
+       },
        {
                .name   = "IRQ_FIRST",
                .start  = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -1051,6 +1060,7 @@ static struct mfd_cell ab8500_devs[] = {
        },
        {
                .name = "ab8500-gpadc",
+               .of_compatible = "stericsson,ab8500-gpadc",
                .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
                .resources = ab8500_gpadc_resources,
        },
@@ -1097,7 +1107,7 @@ static struct mfd_cell ab8500_devs[] = {
                .of_compatible = "stericsson,ab8500-denc",
        },
        {
-               .name = "ab8500-gpio",
+               .name = "pinctrl-ab8500",
                .of_compatible = "stericsson,ab8500-gpio",
        },
        {
@@ -1208,6 +1218,7 @@ static struct mfd_cell ab8505_devs[] = {
        },
        {
                .name = "ab8500-gpadc",
+               .of_compatible = "stericsson,ab8500-gpadc",
                .num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
                .resources = ab8505_gpadc_resources,
        },
@@ -1234,7 +1245,7 @@ static struct mfd_cell ab8505_devs[] = {
                .name = "ab8500-leds",
        },
        {
-               .name = "ab8500-gpio",
+               .name = "pinctrl-ab8505",
        },
        {
                .name = "ab8500-usb",
@@ -1271,6 +1282,7 @@ static struct mfd_cell ab8540_devs[] = {
        },
        {
                .name = "ab8500-gpadc",
+               .of_compatible = "stericsson,ab8500-gpadc",
                .num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
                .resources = ab8505_gpadc_resources,
        },
@@ -1302,7 +1314,7 @@ static struct mfd_cell ab8540_devs[] = {
                .resources = ab8500_temp_resources,
        },
        {
-               .name = "ab8500-gpio",
+               .name = "pinctrl-ab8540",
        },
        {
                .name = "ab8540-usb",
@@ -1712,6 +1724,12 @@ static int ab8500_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+#if CONFIG_DEBUG_FS
+       /* Pass to debugfs */
+       ab8500_debug_resources[0].start = ab8500->irq;
+       ab8500_debug_resources[0].end = ab8500->irq;
+#endif
+
        if (is_ab9540(ab8500))
                ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
                                ARRAY_SIZE(ab9540_devs), NULL,
index b88bbbc15f1e962f8f131d0f50bc25768091f55c..37b7ce4c7c3be57b4c8ea30474ec1edee47f8ad5 100644 (file)
 #include <linux/ctype.h>
 #endif
 
-/* TODO: this file should not reference IRQ_DB8500_AB8500! */
-#include <mach/irqs.h>
-
 static u32 debug_bank;
 static u32 debug_address;
 
+static int irq_ab8500;
 static int irq_first;
 static int irq_last;
 static u32 *irq_count;
@@ -1589,7 +1587,7 @@ void ab8500_debug_register_interrupt(int line)
 {
        if (line < num_interrupt_lines) {
                num_interrupts[line]++;
-               if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500))
+               if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500))
                        num_wake_interrupts[line]++;
        }
 }
@@ -2941,6 +2939,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
        struct dentry *file;
        int ret = -ENOMEM;
        struct ab8500 *ab8500;
+       struct resource *res;
        debug_bank = AB8500_MISC;
        debug_address = AB8500_REV_REG & 0x00FF;
 
@@ -2959,6 +2958,15 @@ static int ab8500_debug_probe(struct platform_device *plf)
        if (!event_name)
                goto out_freedev_attr;
 
+       res = platform_get_resource_byname(plf, 0, "IRQ_AB8500");
+       if (!res) {
+               dev_err(&plf->dev, "AB8500 irq not found, err %d\n",
+                       irq_first);
+               ret = -ENXIO;
+               goto out_freeevent_name;
+       }
+       irq_ab8500 = res->start;
+
        irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
        if (irq_first < 0) {
                dev_err(&plf->dev, "First irq not found, err %d\n",
index 5e65b28a5d0901e1dc5058c084dc444cd2c4a1bd..13f7866de46eb21a4cdf0363f0386d315f8171dd 100644 (file)
@@ -907,14 +907,17 @@ static int ab8500_gpadc_suspend(struct device *dev)
 static int ab8500_gpadc_resume(struct device *dev)
 {
        struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+       int ret;
 
-       regulator_enable(gpadc->regu);
+       ret = regulator_enable(gpadc->regu);
+       if (ret)
+               dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
 
        pm_runtime_mark_last_busy(gpadc->dev);
        pm_runtime_put_autosuspend(gpadc->dev);
 
        mutex_unlock(&gpadc->ab8500_gpadc_lock);
-       return 0;
+       return ret;
 }
 
 static int ab8500_gpadc_probe(struct platform_device *pdev)
index fbca1ced49faac60bfffdef6ff6e19474475ff7b..8e0dae59844d494cd4e2c79904779b8f0a3c0acf 100644 (file)
@@ -23,7 +23,7 @@
 
 static struct device *sysctrl_dev;
 
-void ab8500_power_off(void)
+static void ab8500_power_off(void)
 {
        sigset_t old;
        sigset_t all;
@@ -104,7 +104,7 @@ void ab8500_restart(char mode, const char *cmd)
 
        plat = dev_get_platdata(sysctrl_dev->parent);
        pdata = plat->sysctrl;
-       if (pdata->reboot_reason_code)
+       if (pdata && pdata->reboot_reason_code)
                reason = pdata->reboot_reason_code(cmd);
        else
                pr_warn("[%s] No reboot reason set. Default reason %d\n",
@@ -188,14 +188,15 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
 
        plat = dev_get_platdata(pdev->dev.parent);
 
-       if (!(plat && plat->sysctrl))
+       if (!plat)
                return -EINVAL;
 
-       if (plat->pm_power_off)
+       sysctrl_dev = &pdev->dev;
+
+       if (!pm_power_off)
                pm_power_off = ab8500_power_off;
 
        pdata = plat->sysctrl;
-
        if (pdata) {
                int last, ret, i, j;
 
@@ -226,6 +227,10 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
 static int ab8500_sysctrl_remove(struct platform_device *pdev)
 {
        sysctrl_dev = NULL;
+
+       if (pm_power_off == ab8500_power_off)
+               pm_power_off = NULL;
+
        return 0;
 }
 
index 9818afba25153b377cf2b64b54b18bccc02f3e2c..3714acb6145864611cd29fd314c7bfd9d0af5bfb 100644 (file)
@@ -156,7 +156,7 @@ EXPORT_SYMBOL(abx500_startup_irq_enabled);
 void abx500_dump_all_banks(void)
 {
        struct abx500_ops *ops;
-       struct device dummy_child = {0};
+       struct device dummy_child = {NULL};
        struct abx500_device_entry *dev_entry;
 
        list_for_each_entry(dev_entry, &abx500_list, list) {
index 19193cf1e7a1bf9075a4eb63e157b6a419eba14f..367ccb58ecb15a1954cf39e936c46bc5a4bf3498 100644 (file)
@@ -120,7 +120,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
 
                for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
                        if (*ptr == EC_MSG_HEADER) {
-                               dev_dbg(ec_dev->dev, "msg found at %ld\n",
+                               dev_dbg(ec_dev->dev, "msg found at %zd\n",
                                        ptr - ec_dev->din);
                                break;
                        }
@@ -154,7 +154,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
                 * maximum-supported transfer size.
                 */
                todo = min(need_len, 256);
-               dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n",
+               dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
                        todo, need_len, ptr - ec_dev->din);
 
                memset(&trans, '\0', sizeof(trans));
@@ -178,7 +178,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
                need_len -= todo;
        }
 
-       dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din);
+       dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
 
        return 0;
 }
index 319b8abe742b4ef0d4bf5b74e300514e5c031c91..66f80973596bd8833557d13c1bfc5426a6b1290d 100644 (file)
@@ -1613,6 +1613,8 @@ static unsigned long dsiclk_rate(u8 n)
 
        if (divsel == PRCM_DSI_PLLOUT_SEL_OFF)
                divsel = dsiclk[n].divsel;
+       else
+               dsiclk[n].divsel = divsel;
 
        switch (divsel) {
        case PRCM_DSI_PLLOUT_SEL_PHI_4:
@@ -3095,6 +3097,7 @@ static struct mfd_cell db8500_prcmu_devs[] = {
                .num_resources = ARRAY_SIZE(db8500_thsens_resources),
                .resources = db8500_thsens_resources,
                .platform_data = &db8500_thsens_data,
+               .pdata_size = sizeof(db8500_thsens_data),
        },
 };
 
index 5be3b5e13855045f27c4697da69710ec324e74e9..d8d5137f9717214f70b235c15e51215ac684807b 100644 (file)
@@ -414,11 +414,6 @@ static int intel_msic_probe(struct platform_device *pdev)
         * the clients via intel_msic_irq_read().
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
-               return -ENODEV;
-       }
-
        msic->irq_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(msic->irq_base))
                return PTR_ERR(msic->irq_base);
index de48b4e884501b7cce5540f87f9144c0a8b60742..6f1ef63086c9df98c56c00369c7b91b60c5320c5 100644 (file)
@@ -29,6 +29,8 @@
 
 #include <linux/mfd/si476x-core.h>
 
+#include <asm/unaligned.h>
+
 #define msb(x)                  ((u8)((u16) x >> 8))
 #define lsb(x)                  ((u8)((u16) x &  0x00FF))
 
@@ -150,7 +152,7 @@ enum si476x_acf_status_report_bits {
        SI476X_ACF_SOFTMUTE_INT = (1 << 0),
 
        SI476X_ACF_SMUTE        = (1 << 0),
-       SI476X_ACF_SMATTN       = 0b11111,
+       SI476X_ACF_SMATTN       = 0x1f,
        SI476X_ACF_PILOT        = (1 << 7),
        SI476X_ACF_STBLEND      = ~SI476X_ACF_PILOT,
 };
@@ -483,7 +485,7 @@ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property)
        if (err < 0)
                return err;
        else
-               return be16_to_cpup((__be16 *)(resp + 2));
+               return get_unaligned_be16(resp + 2);
 }
 EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
 
@@ -772,18 +774,18 @@ int si476x_core_cmd_am_rsq_status(struct si476x_core *core,
        if (!report)
                return err;
 
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
 
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
 
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -931,26 +933,26 @@ int si476x_core_cmd_fm_rds_status(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->rdstpptyint     = 0b00010000 & resp[1];
-       report->rdspiint        = 0b00001000 & resp[1];
-       report->rdssyncint      = 0b00000010 & resp[1];
-       report->rdsfifoint      = 0b00000001 & resp[1];
+       report->rdstpptyint     = 0x10 & resp[1];
+       report->rdspiint        = 0x08 & resp[1];
+       report->rdssyncint      = 0x02 & resp[1];
+       report->rdsfifoint      = 0x01 & resp[1];
 
-       report->tpptyvalid      = 0b00010000 & resp[2];
-       report->pivalid         = 0b00001000 & resp[2];
-       report->rdssync         = 0b00000010 & resp[2];
-       report->rdsfifolost     = 0b00000001 & resp[2];
+       report->tpptyvalid      = 0x10 & resp[2];
+       report->pivalid         = 0x08 & resp[2];
+       report->rdssync         = 0x02 & resp[2];
+       report->rdsfifolost     = 0x01 & resp[2];
 
-       report->tp              = 0b00100000 & resp[3];
-       report->pty             = 0b00011111 & resp[3];
+       report->tp              = 0x20 & resp[3];
+       report->pty             = 0x1f & resp[3];
 
-       report->pi              = be16_to_cpup((__be16 *)(resp + 4));
+       report->pi              = get_unaligned_be16(resp + 4);
        report->rdsfifoused     = resp[6];
 
-       report->ble[V4L2_RDS_BLOCK_A]   = 0b11000000 & resp[7];
-       report->ble[V4L2_RDS_BLOCK_B]   = 0b00110000 & resp[7];
-       report->ble[V4L2_RDS_BLOCK_C]   = 0b00001100 & resp[7];
-       report->ble[V4L2_RDS_BLOCK_D]   = 0b00000011 & resp[7];
+       report->ble[V4L2_RDS_BLOCK_A]   = 0xc0 & resp[7];
+       report->ble[V4L2_RDS_BLOCK_B]   = 0x30 & resp[7];
+       report->ble[V4L2_RDS_BLOCK_C]   = 0x0c & resp[7];
+       report->ble[V4L2_RDS_BLOCK_D]   = 0x03 & resp[7];
 
        report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A;
        report->rds[V4L2_RDS_BLOCK_A].msb = resp[8];
@@ -991,9 +993,9 @@ int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core,
                                       SI476X_DEFAULT_TIMEOUT);
 
        if (!err) {
-               report->expected        = be16_to_cpup((__be16 *)(resp + 2));
-               report->received        = be16_to_cpup((__be16 *)(resp + 4));
-               report->uncorrectable   = be16_to_cpup((__be16 *)(resp + 6));
+               report->expected        = get_unaligned_be16(resp + 2);
+               report->received        = get_unaligned_be16(resp + 4);
+               report->uncorrectable   = get_unaligned_be16(resp + 6);
        }
 
        return err;
@@ -1005,7 +1007,7 @@ int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core,
 {
        u8       resp[CMD_FM_PHASE_DIVERSITY_NRESP];
        const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = {
-               mode & 0b111,
+               mode & 0x07,
        };
 
        return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY,
@@ -1162,7 +1164,7 @@ static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core,
        const int am_freq = tuneargs->freq;
        u8       resp[CMD_AM_TUNE_FREQ_NRESP];
        const u8 args[CMD_AM_TUNE_FREQ_NARGS] = {
-               (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11),
+               (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03),
                msb(am_freq),
                lsb(am_freq),
        };
@@ -1197,20 +1199,20 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->multhint        = 0b10000000 & resp[1];
-       report->multlint        = 0b01000000 & resp[1];
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
+       report->multhint        = 0x80 & resp[1];
+       report->multlint        = 0x40 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
 
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
 
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -1218,7 +1220,7 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
        report->hassi           = resp[10];
        report->mult            = resp[11];
        report->dev             = resp[12];
-       report->readantcap      = be16_to_cpup((__be16 *)(resp + 13));
+       report->readantcap      = get_unaligned_be16(resp + 13);
        report->assi            = resp[15];
        report->usn             = resp[16];
 
@@ -1251,20 +1253,20 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->multhint        = 0b10000000 & resp[1];
-       report->multlint        = 0b01000000 & resp[1];
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
+       report->multhint        = 0x80 & resp[1];
+       report->multlint        = 0x40 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
 
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
 
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -1272,7 +1274,7 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
        report->hassi           = resp[10];
        report->mult            = resp[11];
        report->dev             = resp[12];
-       report->readantcap      = be16_to_cpup((__be16 *)(resp + 13));
+       report->readantcap      = get_unaligned_be16(resp + 13);
        report->assi            = resp[15];
        report->usn             = resp[16];
 
@@ -1306,21 +1308,21 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->multhint        = 0b10000000 & resp[1];
-       report->multlint        = 0b01000000 & resp[1];
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
-
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->injside         = 0b00000100 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
-
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->multhint        = 0x80 & resp[1];
+       report->multlint        = 0x40 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
+
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->injside         = 0x04 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
+
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -1329,7 +1331,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
        report->hassi           = resp[10];
        report->mult            = resp[11];
        report->dev             = resp[12];
-       report->readantcap      = be16_to_cpup((__be16 *)(resp + 13));
+       report->readantcap      = get_unaligned_be16(resp + 13);
        report->assi            = resp[15];
        report->usn             = resp[16];
 
@@ -1337,7 +1339,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
        report->rdsdev          = resp[18];
        report->assidev         = resp[19];
        report->strongdev       = resp[20];
-       report->rdspi           = be16_to_cpup((__be16 *)(resp + 21));
+       report->rdspi           = get_unaligned_be16(resp + 21);
 
        return err;
 }
index c09c28f92055b156245305da692e1c98582839cc..1abd5ad599251cc655672f656c2718bf65c49aa2 100644 (file)
@@ -154,11 +154,6 @@ static int ssc_probe(struct platform_device *pdev)
        ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_dbg(&pdev->dev, "no mmio resource defined\n");
-               return -ENXIO;
-       }
-
        ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
        if (IS_ERR(ssc->regs))
                return PTR_ERR(ssc->regs);
index 7014167e2c619f23acc05398973552fd8ab23f9e..c37eeedfe215634775a59fa581d83f749a3fb536 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 
-static int irq;
+static int irq = -1;
 
 static irqreturn_t dummy_interrupt(int irq, void *dev_id)
 {
@@ -36,6 +36,10 @@ static irqreturn_t dummy_interrupt(int irq, void *dev_id)
 
 static int __init dummy_irq_init(void)
 {
+       if (irq < 0) {
+               printk(KERN_ERR "dummy-irq: no IRQ given.  Use irq=N\n");
+               return -EIO;
+       }
        if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) {
                printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq);
                return -EIO;
index 1e935eacaa7faee9903e20303dc703c75ed014ff..9ecd49a7be1b33cac4ece186e35e3ebc255d9d78 100644 (file)
@@ -496,6 +496,8 @@ int mei_cl_disable_device(struct mei_cl_device *device)
                }
        }
 
+       device->event_cb = NULL;
+
        mutex_unlock(&dev->device_lock);
 
        if (!device->ops || !device->ops->disable)
index 713d89fedc46ab03caddac5cbc56ec7dc3dcafc8..f580d30bb7842c564e81dba6b4406b1b01bfdee1 100644 (file)
@@ -197,6 +197,8 @@ void mei_stop(struct mei_device *dev)
 {
        dev_dbg(&dev->pdev->dev, "stopping the device.\n");
 
+       flush_scheduled_work();
+
        mutex_lock(&dev->device_lock);
 
        cancel_delayed_work(&dev->timer_work);
@@ -210,8 +212,6 @@ void mei_stop(struct mei_device *dev)
 
        mutex_unlock(&dev->device_lock);
 
-       flush_scheduled_work();
-
        mei_watchdog_unregister(dev);
 }
 EXPORT_SYMBOL_GPL(mei_stop);
index 7c44c8dbae424904c53e041f5723ab51bf974e6f..053139f610861fa98c6778176d86f5a4969660f7 100644 (file)
@@ -489,11 +489,16 @@ static int mei_ioctl_connect_client(struct file *file,
 
        /* find ME client we're trying to connect to */
        i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
-       if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
-               cl->me_client_id = dev->me_clients[i].client_id;
-               cl->state = MEI_FILE_CONNECTING;
+       if (i < 0 || dev->me_clients[i].props.fixed_address) {
+               dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",
+                               &data->in_client_uuid);
+               rets = -ENODEV;
+               goto end;
        }
 
+       cl->me_client_id = dev->me_clients[i].client_id;
+       cl->state = MEI_FILE_CONNECTING;
+
        dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
                        cl->me_client_id);
        dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
@@ -527,11 +532,6 @@ static int mei_ioctl_connect_client(struct file *file,
                goto end;
        }
 
-       if (cl->state != MEI_FILE_CONNECTING) {
-               rets = -ENODEV;
-               goto end;
-       }
-
 
        /* prepare the output buffer */
        client = &data->out_client_properties;
@@ -543,7 +543,6 @@ static int mei_ioctl_connect_client(struct file *file,
        rets = mei_cl_connect(cl, file);
 
 end:
-       dev_dbg(&dev->pdev->dev, "free connect cb memory.");
        return rets;
 }
 
index 3adf8a70f26e7af6bf4c849b5dc7de31205cdc03..d0c6907dfd926809620e5ca459781042e32ba093 100644 (file)
@@ -142,6 +142,8 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
                mei_cl_unlink(ndev->cl_info);
                kfree(ndev->cl_info);
        }
+
+       memset(ndev, 0, sizeof(struct mei_nfc_dev));
 }
 
 static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
index a727464e9c3fb31b33f4a7b97c073e2ab4da735a..0f268329bd3aa25f6815923d83c0e2e71a782084 100644 (file)
@@ -325,6 +325,7 @@ static int mei_me_pci_resume(struct device *device)
 
        mutex_lock(&dev->device_lock);
        dev->dev_state = MEI_DEV_POWER_UP;
+       mei_clear_interrupts(dev);
        mei_reset(dev, 1);
        mutex_unlock(&dev->device_lock);
 
index 44d273c5e19d7befc7161f566b42f7e3e544ad18..0535d1e0bc78790a73c2f19c2877c9d2e078acfd 100644 (file)
@@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg)
                nodesperblade = 2;
        else
                nodesperblade = 1;
+       memset(&info, 0, sizeof(info));
        info.cpus = num_online_cpus();
        info.nodes = num_online_nodes();
        info.blades = info.nodes / nodesperblade;
index ea98f7e9ccd19d6ef8ea862616bcc4c0f3055b8d..39c2ecadb273d374b41be572bee6431e7b81e784 100644 (file)
@@ -4,7 +4,7 @@
 
 config VMWARE_VMCI
        tristate "VMware VMCI Driver"
-       depends on X86 && PCI && NET
+       depends on X86 && PCI
        help
          This is VMware's Virtual Machine Communication Interface.  It enables
          high-speed communication between host and guest in a virtual
index d94245dbd7651edc70e9bb23d6d9dcacbfdbd5d7..8ff2e5ee8fb8b955f8da30c8b2a2eba94ed43710 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/pagemap.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/socket.h>
+#include <linux/uio.h>
 #include <linux/wait.h>
 #include <linux/vmalloc.h>
 
index e75774f72606487573cd3ee1817040b460bd11fb..aca59d93d5a9b8d496790935d89c5018de796d61 100644 (file)
@@ -2230,10 +2230,15 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
        mmc_free_host(slot->mmc);
 }
 
-static bool atmci_filter(struct dma_chan *chan, void *slave)
+static bool atmci_filter(struct dma_chan *chan, void *pdata)
 {
-       struct mci_dma_data     *sl = slave;
+       struct mci_platform_data *sl_pdata = pdata;
+       struct mci_dma_data *sl;
 
+       if (!sl_pdata)
+               return false;
+
+       sl = sl_pdata->dma_slave;
        if (sl && find_slave_dev(sl) == chan->device->dev) {
                chan->private = slave_data_ptr(sl);
                return true;
@@ -2245,24 +2250,18 @@ static bool atmci_filter(struct dma_chan *chan, void *slave)
 static bool atmci_configure_dma(struct atmel_mci *host)
 {
        struct mci_platform_data        *pdata;
+       dma_cap_mask_t mask;
 
        if (host == NULL)
                return false;
 
        pdata = host->pdev->dev.platform_data;
 
-       if (!pdata)
-               return false;
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
 
-       if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) {
-               dma_cap_mask_t mask;
-
-               /* Try to grab a DMA channel */
-               dma_cap_zero(mask);
-               dma_cap_set(DMA_SLAVE, mask);
-               host->dma.chan =
-                       dma_request_channel(mask, atmci_filter, pdata->dma_slave);
-       }
+       host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
+                                                         &host->pdev->dev, "rxtx");
        if (!host->dma.chan) {
                dev_warn(&host->pdev->dev, "no DMA channel available\n");
                return false;
index 375c109607ff2020e632834cb4ffe7eca95fc2b1..f4f3038c1df08e2d5934014875abe38782f67db6 100644 (file)
@@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        struct variant_data *variant = host->variant;
        u32 pwr = 0;
        unsigned long flags;
+       int ret;
 
        pm_runtime_get_sync(mmc_dev(mmc));
 
@@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                break;
        case MMC_POWER_ON:
                if (!IS_ERR(mmc->supply.vqmmc) &&
-                   !regulator_is_enabled(mmc->supply.vqmmc))
-                       regulator_enable(mmc->supply.vqmmc);
+                   !regulator_is_enabled(mmc->supply.vqmmc)) {
+                       ret = regulator_enable(mmc->supply.vqmmc);
+                       if (ret < 0)
+                               dev_err(mmc_dev(mmc),
+                                       "failed to enable vqmmc regulator\n");
+               }
 
                pwr |= MCI_PWR_ON;
                break;
index 6e44025acf01fcf9669567dff93b8c3d712b2519..eccedc7d06a4301169d24187edc1925fe4a49f87 100644 (file)
@@ -161,6 +161,7 @@ struct omap_hsmmc_host {
         */
        struct  regulator       *vcc;
        struct  regulator       *vcc_aux;
+       int                     pbias_disable;
        void    __iomem         *base;
        resource_size_t         mapbase;
        spinlock_t              irq_lock; /* Prevent races with irq handler */
@@ -255,11 +256,11 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
        if (!host->vcc)
                return 0;
        /*
-        * With DT, never turn OFF the regulator. This is because
+        * With DT, never turn OFF the regulator for MMC1. This is because
         * the pbias cell programming support is still missing when
         * booting with Device tree
         */
-       if (dev->of_node && !vdd)
+       if (host->pbias_disable && !vdd)
                return 0;
 
        if (mmc_slot(host).before_set_reg)
@@ -1520,10 +1521,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                        (ios->vdd == DUAL_VOLT_OCR_BIT) &&
                        /*
                         * With pbias cell programming missing, this
-                        * can't be allowed when booting with device
+                        * can't be allowed on MMC1 when booting with device
                         * tree.
                         */
-                       !host->dev->of_node) {
+                       !host->pbias_disable) {
                                /*
                                 * The mmc_select_voltage fn of the core does
                                 * not seem to set the power_mode to
@@ -1871,6 +1872,10 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
 
        omap_hsmmc_context_save(host);
 
+       /* This can be removed once we support PBIAS with DT */
+       if (host->dev->of_node && host->mapbase == 0x4809c000)
+               host->pbias_disable = 1;
+
        host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
        /*
         * MMC can still work without debounce clock.
@@ -1906,33 +1911,41 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
 
        omap_hsmmc_conf_bus_power(host);
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
-       if (!res) {
-               dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
-               ret = -ENXIO;
-               goto err_irq;
-       }
-       tx_req = res->start;
+       if (!pdev->dev.of_node) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+               if (!res) {
+                       dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
+                       ret = -ENXIO;
+                       goto err_irq;
+               }
+               tx_req = res->start;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
-       if (!res) {
-               dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
-               ret = -ENXIO;
-               goto err_irq;
+               res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+               if (!res) {
+                       dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+                       ret = -ENXIO;
+                       goto err_irq;
+               }
+               rx_req = res->start;
        }
-       rx_req = res->start;
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
 
-       host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+       host->rx_chan =
+               dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+                                                &rx_req, &pdev->dev, "rx");
+
        if (!host->rx_chan) {
                dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
                ret = -ENXIO;
                goto err_irq;
        }
 
-       host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+       host->tx_chan =
+               dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+                                                &tx_req, &pdev->dev, "tx");
+
        if (!host->tx_chan) {
                dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
                ret = -ENXIO;
index 7bcf74b1a5cdd8d6d2fd113fb16f540365db0a45..706d9cb1a49e1ed794cded2fbbcc033a4a892b9a 100644 (file)
@@ -87,6 +87,12 @@ static const struct sdhci_ops sdhci_acpi_ops_dflt = {
        .enable_dma = sdhci_acpi_enable_dma,
 };
 
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+       .caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+       .caps2   = MMC_CAP2_HC_ERASE_SZ,
+       .flags   = SDHCI_ACPI_RUNTIME_PM,
+};
+
 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
        .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
        .caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
@@ -94,23 +100,67 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
        .pm_caps = MMC_PM_KEEP_POWER,
 };
 
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
+};
+
+struct sdhci_acpi_uid_slot {
+       const char *hid;
+       const char *uid;
+       const struct sdhci_acpi_slot *slot;
+};
+
+static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+       { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+       { "80860F14" , "3" , &sdhci_acpi_slot_int_sd   },
+       { "INT33BB"  , "2" , &sdhci_acpi_slot_int_sdio },
+       { "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
+       { "PNP0D40"  },
+       { },
+};
+
 static const struct acpi_device_id sdhci_acpi_ids[] = {
-       { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio },
-       { "PNP0D40" },
+       { "80860F14" },
+       { "INT33BB"  },
+       { "INT33C6"  },
+       { "PNP0D40"  },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
 
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid)
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid,
+                                                               const char *uid)
 {
-       const struct acpi_device_id *id;
-
-       for (id = sdhci_acpi_ids; id->id[0]; id++)
-               if (!strcmp(id->id, hid))
-                       return (const struct sdhci_acpi_slot *)id->driver_data;
+       const struct sdhci_acpi_uid_slot *u;
+
+       for (u = sdhci_acpi_uids; u->hid; u++) {
+               if (strcmp(u->hid, hid))
+                       continue;
+               if (!u->uid)
+                       return u->slot;
+               if (uid && !strcmp(u->uid, uid))
+                       return u->slot;
+       }
        return NULL;
 }
 
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle,
+                                                        const char *hid)
+{
+       const struct sdhci_acpi_slot *slot;
+       struct acpi_device_info *info;
+       const char *uid = NULL;
+       acpi_status status;
+
+       status = acpi_get_object_info(handle, &info);
+       if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID))
+               uid = info->unique_id.string;
+
+       slot = sdhci_acpi_get_slot_by_ids(hid, uid);
+
+       kfree(info);
+       return slot;
+}
+
 static int sdhci_acpi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -148,7 +198,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
 
        c = sdhci_priv(host);
        c->host = host;
-       c->slot = sdhci_acpi_get_slot(hid);
+       c->slot = sdhci_acpi_get_slot(handle, hid);
        c->pdev = pdev;
        c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
 
@@ -202,6 +252,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                goto err_free;
 
        if (c->use_runtime_pm) {
+               pm_runtime_set_active(dev);
                pm_suspend_ignore_children(dev, 1);
                pm_runtime_set_autosuspend_delay(dev, 50);
                pm_runtime_use_autosuspend(dev);
index 67d6dde2ff1961015304b3d3fac3fcd7040bcfd9..d5f0d59e13104957b7539f28ec743b5713f6bb2e 100644 (file)
@@ -85,6 +85,12 @@ struct pltfm_imx_data {
        struct clk *clk_ipg;
        struct clk *clk_ahb;
        struct clk *clk_per;
+       enum {
+               NO_CMD_PENDING,      /* no multiblock command pending*/
+               MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
+               WAIT_FOR_INT,        /* sent CMD12, waiting for response INT */
+       } multiblock_status;
+
 };
 
 static struct platform_device_id imx_esdhc_devtype[] = {
@@ -154,6 +160,8 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
 
 static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
 {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct pltfm_imx_data *imx_data = pltfm_host->priv;
        u32 val = readl(host->ioaddr + reg);
 
        if (unlikely(reg == SDHCI_CAPABILITIES)) {
@@ -175,6 +183,18 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
                        val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
                        val |= SDHCI_INT_ADMA_ERROR;
                }
+
+               /*
+                * mask off the interrupt we get in response to the manually
+                * sent CMD12
+                */
+               if ((imx_data->multiblock_status == WAIT_FOR_INT) &&
+                   ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) {
+                       val &= ~SDHCI_INT_RESPONSE;
+                       writel(SDHCI_INT_RESPONSE, host->ioaddr +
+                                                  SDHCI_INT_STATUS);
+                       imx_data->multiblock_status = NO_CMD_PENDING;
+               }
        }
 
        return val;
@@ -211,6 +231,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
                        v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
                        v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
                        writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
+
+                       if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS)
+                       {
+                               /* send a manual CMD12 with RESPTYP=none */
+                               data = MMC_STOP_TRANSMISSION << 24 |
+                                      SDHCI_CMD_ABORTCMD << 16;
+                               writel(data, host->ioaddr + SDHCI_TRANSFER_MODE);
+                               imx_data->multiblock_status = WAIT_FOR_INT;
+                       }
        }
 
        if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
@@ -277,11 +306,13 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
                }
                return;
        case SDHCI_COMMAND:
-               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
-                    host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
-                   (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+               if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
                        val |= SDHCI_CMD_ABORTCMD;
 
+               if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+                   (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+                       imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
+
                if (is_imx6q_usdhc(imx_data))
                        writel(val << 16,
                               host->ioaddr + SDHCI_TRANSFER_MODE);
@@ -324,8 +355,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
                /*
                 * Do not touch buswidth bits here. This is done in
                 * esdhc_pltfm_bus_width.
+                * Do not touch the D3CD bit either which is used for the
+                * SDIO interrupt errata workaround.
                 */
-               mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK;
+               mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
 
                esdhc_clrset_le(host, mask, new_val, reg);
                return;
index 0012d3fdc999753da93389868d5f90957f5c7127..701d06d0e1fb2c63a174f0332da96a37850fd240 100644 (file)
@@ -33,6 +33,9 @@
  */
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO0  0x8809
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1  0x880a
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC   0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO   0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD     0x0f16
 
 /*
  * PCI registers
@@ -304,6 +307,33 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
        .probe_slot     = pch_hc_probe_slot,
 };
 
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+       slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
+       slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+       return 0;
+}
+
+static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+       slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
+       return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+       .allow_runtime_pm = true,
+       .probe_slot     = byt_emmc_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+       .quirks2        = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+       .allow_runtime_pm = true,
+       .probe_slot     = byt_sdio_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+};
+
 /* O2Micro extra registers */
 #define O2_SD_LOCK_WP          0xD3
 #define O2_SD_MULTI_VCC3V      0xEE
@@ -855,6 +885,30 @@ static const struct pci_device_id pci_ids[] = {
                .driver_data    = (kernel_ulong_t)&sdhci_intel_pch_sdio,
        },
 
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BYT_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BYT_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
index a94facb46e5ca76bb239af03a32b160571ccd467..fd1df5e13ae44d77207d19fb492064166bf46525 100644 (file)
@@ -672,11 +672,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
 
        rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (rc == NULL) {
-               dev_err(&pdev->dev, "No memory resource found for device!\r\n");
-               return -ENXIO;
-       }
-
        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
        if (IS_ERR(host->io_base))
                return PTR_ERR(host->io_base);
index 3835321b8cf38bbc86b26383d5796964a3c494a5..b45b240889f5049db48f0d90e4d7146b57985b8e 100644 (file)
@@ -25,6 +25,9 @@ menuconfig NETDEVICES
 # that for each of the symbols.
 if NETDEVICES
 
+config MII
+       tristate
+
 config NET_CORE
        default y
        bool "Network core driver support"
@@ -100,13 +103,6 @@ config NET_FC
          adaptor below. You also should have said Y to "SCSI support" and
          "SCSI generic support".
 
-config MII
-       tristate "Generic Media Independent Interface device support"
-       help
-         Most ethernet controllers have MII transceiver either as an external
-         or internal device.  It is safe to say Y or M here even if your
-         ethernet card lacks MII.
-
 config IFB
        tristate "Intermediate Functional Block support"
        depends on NET_CLS_ACT
@@ -244,6 +240,16 @@ config VIRTIO_NET
          This is the virtual network driver for virtio.  It can be used with
          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
 
+config NLMON
+       tristate "Virtual netlink monitoring device"
+       ---help---
+         This option enables a monitoring net device for netlink skbs. The
+         purpose of this is to analyze netlink messages with packet sockets.
+         Thus applications like tcpdump will be able to see local netlink
+         messages if they tap into the netlink device, record pcaps for further
+         diagnostics, etc. This is mostly intended for developers or support
+         to debug netlink issues. If unsure, say N.
+
 endif # NET_CORE
 
 config SUNGEM_PHY
index ef3d090efedfdbababcd90487095c7c1e55b1d17..3fef8a81c0f69ae990579a41d9d6fd9c44aa58cf 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_TUN) += tun.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_NLMON) += nlmon.o
 
 #
 # Networking Drivers
index fc58d118d844e3eefd684348d9ffeac48b7dfe74..390061d096934f83c04171a1a8a62bf9d8345ec5 100644 (file)
@@ -2360,14 +2360,15 @@ int bond_3ad_set_carrier(struct bonding *bond)
 }
 
 /**
- * bond_3ad_get_active_agg_info - get information of the active aggregator
+ * __bond_3ad_get_active_agg_info - get information of the active aggregator
  * @bond: bonding struct to work on
  * @ad_info: ad_info struct to fill with the bond's info
  *
  * Returns:   0 on success
  *          < 0 on error
  */
-int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
+int __bond_3ad_get_active_agg_info(struct bonding *bond,
+                                  struct ad_info *ad_info)
 {
        struct aggregator *aggregator = NULL;
        struct port *port;
@@ -2391,6 +2392,18 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
        return -1;
 }
 
+/* Wrapper used to hold bond->lock so no slave manipulation can occur */
+int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
+{
+       int ret;
+
+       read_lock(&bond->lock);
+       ret = __bond_3ad_get_active_agg_info(bond, ad_info);
+       read_unlock(&bond->lock);
+
+       return ret;
+}
+
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 {
        struct slave *slave, *start_at;
@@ -2402,8 +2415,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        struct ad_info ad_info;
        int res = 1;
 
-       if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
-               pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
+       if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+               pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
                         dev->name);
                goto out;
        }
index 0cfaa4afdecea333bd3d8831aa1e8c9e118e50b7..5d91ad0cc04142df9e73a52a62d06e5ab4d3898e 100644 (file)
@@ -273,6 +273,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave);
 void bond_3ad_adapter_duplex_changed(struct slave *slave);
 void bond_3ad_handle_link_change(struct slave *slave, char link);
 int  bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
+int  __bond_3ad_get_active_agg_info(struct bonding *bond,
+                                   struct ad_info *ad_info);
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
 int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave);
index e02cc265723abbaf397abbf84b0751d817dc5efd..4ea8ed150d469d55c741d63c09fa4fc7f7a29fa5 100644 (file)
@@ -1056,7 +1056,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
  *
  */
 
-static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2)
+static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
 {
        u8 tmp_mac_addr[ETH_ALEN];
 
@@ -1129,6 +1129,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
 {
        int perm_curr_diff;
        int perm_bond_diff;
+       struct slave *found_slave;
 
        perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
                                                  slave->dev->dev_addr);
@@ -1136,21 +1137,12 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
                                                  bond->dev->dev_addr);
 
        if (perm_curr_diff && perm_bond_diff) {
-               struct slave *tmp_slave;
-               int i, found = 0;
-
-               bond_for_each_slave(bond, tmp_slave, i) {
-                       if (ether_addr_equal_64bits(slave->perm_hwaddr,
-                                                   tmp_slave->dev->dev_addr)) {
-                               found = 1;
-                               break;
-                       }
-               }
+               found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr);
 
-               if (found) {
+               if (found_slave) {
                        /* locking: needs RTNL and nothing else */
-                       alb_swap_mac_addr(bond, slave, tmp_slave);
-                       alb_fasten_mac_swap(bond, slave, tmp_slave);
+                       alb_swap_mac_addr(slave, found_slave);
+                       alb_fasten_mac_swap(bond, slave, found_slave);
                }
        }
 }
@@ -1175,16 +1167,13 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
  * @slave.
  *
  * assumption: this function is called before @slave is attached to the
- *            bond slave list.
- *
- * caller must hold the bond lock for write since the mac addresses are compared
- * and may be swapped.
+ *            bond slave list.
  */
 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
 {
-       struct slave *tmp_slave1, *tmp_slave2, *free_mac_slave;
+       struct slave *tmp_slave1, *free_mac_slave = NULL;
        struct slave *has_bond_addr = bond->curr_active_slave;
-       int i, j, found = 0;
+       int i;
 
        if (bond->slave_cnt == 0) {
                /* this is the first slave */
@@ -1196,15 +1185,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
         * slaves in the bond.
         */
        if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
-               bond_for_each_slave(bond, tmp_slave1, i) {
-                       if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
-                                                   slave->dev->dev_addr)) {
-                               found = 1;
-                               break;
-                       }
-               }
-
-               if (!found)
+               if (!bond_slave_has_mac(bond, slave->dev->dev_addr))
                        return 0;
 
                /* Try setting slave mac to bond address and fall-through
@@ -1215,19 +1196,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        /* The slave's address is equal to the address of the bond.
         * Search for a spare address in the bond for this slave.
         */
-       free_mac_slave = NULL;
-
        bond_for_each_slave(bond, tmp_slave1, i) {
-               found = 0;
-               bond_for_each_slave(bond, tmp_slave2, j) {
-                       if (ether_addr_equal_64bits(tmp_slave1->perm_hwaddr,
-                                                   tmp_slave2->dev->dev_addr)) {
-                               found = 1;
-                               break;
-                       }
-               }
-
-               if (!found) {
+               if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
                        /* no slave has tmp_slave1's perm addr
                         * as its curr addr
                         */
@@ -1607,15 +1577,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
                return res;
        }
 
-       /* caller must hold the bond lock for write since the mac addresses
-        * are compared and may be swapped.
-        */
-       read_lock(&bond->lock);
-
        res = alb_handle_addr_collision_on_attach(bond, slave);
-
-       read_unlock(&bond->lock);
-
        if (res) {
                return res;
        }
@@ -1698,7 +1660,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        __acquires(&bond->curr_slave_lock)
 {
        struct slave *swap_slave;
-       int i;
 
        if (bond->curr_active_slave == new_slave) {
                return;
@@ -1720,17 +1681,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        /* set the new curr_active_slave to the bonds mac address
         * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
         */
-       if (!swap_slave) {
-               struct slave *tmp_slave;
-               /* find slave that is holding the bond's mac address */
-               bond_for_each_slave(bond, tmp_slave, i) {
-                       if (ether_addr_equal_64bits(tmp_slave->dev->dev_addr,
-                                                   bond->dev->dev_addr)) {
-                               swap_slave = tmp_slave;
-                               break;
-                       }
-               }
-       }
+       if (!swap_slave)
+               swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr);
 
        /*
         * Arrange for swap_slave and new_slave to temporarily be
@@ -1750,16 +1702,12 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        /* curr_active_slave must be set before calling alb_swap_mac_addr */
        if (swap_slave) {
                /* swap mac address */
-               alb_swap_mac_addr(bond, swap_slave, new_slave);
-       } else {
-               /* set the new_slave to the bond mac address */
-               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-       }
-
-       if (swap_slave) {
+               alb_swap_mac_addr(swap_slave, new_slave);
                alb_fasten_mac_swap(bond, swap_slave, new_slave);
                read_lock(&bond->lock);
        } else {
+               /* set the new_slave to the bond mac address */
+               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
                read_lock(&bond->lock);
                alb_send_learning_packets(new_slave, bond->dev->dev_addr);
        }
@@ -1776,9 +1724,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct sockaddr *sa = addr;
-       struct slave *slave, *swap_slave;
+       struct slave *swap_slave;
        int res;
-       int i;
 
        if (!is_valid_ether_addr(sa->sa_data)) {
                return -EADDRNOTAVAIL;
@@ -1799,18 +1746,10 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                return 0;
        }
 
-       swap_slave = NULL;
-
-       bond_for_each_slave(bond, slave, i) {
-               if (ether_addr_equal_64bits(slave->dev->dev_addr,
-                                           bond_dev->dev_addr)) {
-                       swap_slave = slave;
-                       break;
-               }
-       }
+       swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
 
        if (swap_slave) {
-               alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
+               alb_swap_mac_addr(swap_slave, bond->curr_active_slave);
                alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
        } else {
                alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
index d0aade04e49aff739294de120d44defaba46fbf2..142d55dc526ee0bdd4792900498b8763de177447 100644 (file)
@@ -104,6 +104,7 @@ static char *xmit_hash_policy;
 static int arp_interval = BOND_LINK_ARP_INTERV;
 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 static char *arp_validate;
+static char *arp_all_targets;
 static char *fail_over_mac;
 static int all_slaves_active = 0;
 static struct bond_params bonding_defaults;
@@ -166,6 +167,8 @@ module_param(arp_validate, charp, 0);
 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
                               "0 for none (default), 1 for active, "
                               "2 for backup, 3 for all");
+module_param(arp_all_targets, charp, 0);
+MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
 module_param(fail_over_mac, charp, 0);
 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
                                "the same MAC; 0 for none (default), "
@@ -216,6 +219,12 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
 {      NULL,                   -1},
 };
 
+const struct bond_parm_tbl arp_all_targets_tbl[] = {
+{      "any",                  BOND_ARP_TARGETS_ANY},
+{      "all",                  BOND_ARP_TARGETS_ALL},
+{      NULL,                   -1},
+};
+
 const struct bond_parm_tbl arp_validate_tbl[] = {
 {      "none",                 BOND_ARP_VALIDATE_NONE},
 {      "active",               BOND_ARP_VALIDATE_ACTIVE},
@@ -706,45 +715,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        return err;
 }
 
-/*
- * Add a Multicast address to slaves
- * according to mode
- */
-static void bond_mc_add(struct bonding *bond, void *addr)
-{
-       if (USES_PRIMARY(bond->params.mode)) {
-               /* write lock already acquired */
-               if (bond->curr_active_slave)
-                       dev_mc_add(bond->curr_active_slave->dev, addr);
-       } else {
-               struct slave *slave;
-               int i;
-
-               bond_for_each_slave(bond, slave, i)
-                       dev_mc_add(slave->dev, addr);
-       }
-}
-
-/*
- * Remove a multicast address from slave
- * according to mode
- */
-static void bond_mc_del(struct bonding *bond, void *addr)
-{
-       if (USES_PRIMARY(bond->params.mode)) {
-               /* write lock already acquired */
-               if (bond->curr_active_slave)
-                       dev_mc_del(bond->curr_active_slave->dev, addr);
-       } else {
-               struct slave *slave;
-               int i;
-               bond_for_each_slave(bond, slave, i) {
-                       dev_mc_del(slave->dev, addr);
-               }
-       }
-}
-
-
 static void __bond_resend_igmp_join_requests(struct net_device *dev)
 {
        struct in_device *in_dev;
@@ -764,8 +734,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
        struct net_device *bond_dev, *vlan_dev, *upper_dev;
        struct vlan_entry *vlan;
 
-       rcu_read_lock();
        read_lock(&bond->lock);
+       rcu_read_lock();
 
        bond_dev = bond->dev;
 
@@ -787,12 +757,19 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
                if (vlan_dev)
                        __bond_resend_igmp_join_requests(vlan_dev);
        }
+       rcu_read_unlock();
 
-       if (--bond->igmp_retrans > 0)
+       /* We use curr_slave_lock to protect against concurrent access to
+        * igmp_retrans from multiple running instances of this function and
+        * bond_change_active_slave
+        */
+       write_lock_bh(&bond->curr_slave_lock);
+       if (bond->igmp_retrans > 1) {
+               bond->igmp_retrans--;
                queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+       }
+       write_unlock_bh(&bond->curr_slave_lock);
        read_unlock(&bond->lock);
-       rcu_read_unlock();
 }
 
 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -803,17 +780,15 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
        bond_resend_igmp_join_requests(bond);
 }
 
-/*
- * flush all members of flush->mc_list from device dev->mc_list
+/* Flush bond's hardware addresses from slave
  */
-static void bond_mc_list_flush(struct net_device *bond_dev,
+static void bond_hw_addr_flush(struct net_device *bond_dev,
                               struct net_device *slave_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct netdev_hw_addr *ha;
 
-       netdev_for_each_mc_addr(ha, bond_dev)
-               dev_mc_del(slave_dev, ha->addr);
+       dev_uc_unsync(slave_dev, bond_dev);
+       dev_mc_unsync(slave_dev, bond_dev);
 
        if (bond->params.mode == BOND_MODE_8023AD) {
                /* del lacpdu mc addr from mc list */
@@ -825,22 +800,14 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
 
 /*--------------------------- Active slave change ---------------------------*/
 
-/*
- * Update the mc list and multicast-related flags for the new and
- * old active slaves (if any) according to the multicast mode, and
- * promiscuous flags unconditionally.
+/* Update the hardware address list and promisc/allmulti for the new and
+ * old active slaves (if any).  Modes that are !USES_PRIMARY keep all
+ * slaves up date at all times; only the USES_PRIMARY modes need to call
+ * this function to swap these settings during a failover.
  */
-static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
-                        struct slave *old_active)
+static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
+                             struct slave *old_active)
 {
-       struct netdev_hw_addr *ha;
-
-       if (!USES_PRIMARY(bond->params.mode))
-               /* nothing to do -  mc list is already up-to-date on
-                * all slaves
-                */
-               return;
-
        if (old_active) {
                if (bond->dev->flags & IFF_PROMISC)
                        dev_set_promiscuity(old_active->dev, -1);
@@ -848,10 +815,7 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(old_active->dev, -1);
 
-               netif_addr_lock_bh(bond->dev);
-               netdev_for_each_mc_addr(ha, bond->dev)
-                       dev_mc_del(old_active->dev, ha->addr);
-               netif_addr_unlock_bh(bond->dev);
+               bond_hw_addr_flush(bond->dev, old_active->dev);
        }
 
        if (new_active) {
@@ -863,8 +827,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
                        dev_set_allmulti(new_active->dev, 1);
 
                netif_addr_lock_bh(bond->dev);
-               netdev_for_each_mc_addr(ha, bond->dev)
-                       dev_mc_add(new_active->dev, ha->addr);
+               dev_uc_sync(new_active->dev, bond->dev);
+               dev_mc_sync(new_active->dev, bond->dev);
                netif_addr_unlock_bh(bond->dev);
        }
 }
@@ -1083,7 +1047,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
        }
 
        if (USES_PRIMARY(bond->params.mode))
-               bond_mc_swap(bond, new_active, old_active);
+               bond_hw_addr_swap(bond, new_active, old_active);
 
        if (bond_is_lb(bond)) {
                bond_alb_handle_active_change(bond, new_active);
@@ -1362,6 +1326,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                                     slave->dev->features,
                                                     mask);
        }
+       features = netdev_add_tso_features(features, mask);
 
 out:
        read_unlock(&bond->lock);
@@ -1525,10 +1490,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        struct bonding *bond = netdev_priv(bond_dev);
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
        struct slave *new_slave = NULL;
-       struct netdev_hw_addr *ha;
        struct sockaddr addr;
        int link_reporting;
-       int res = 0;
+       int res = 0, i;
 
        if (!bond->params.use_carrier &&
            slave_dev->ethtool_ops->get_link == NULL &&
@@ -1705,10 +1669,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                        goto err_close;
        }
 
-       /* If the mode USES_PRIMARY, then the new slave gets the
-        * master's promisc (and mc) settings only if it becomes the
-        * curr_active_slave, and that is taken care of later when calling
-        * bond_change_active()
+       /* If the mode USES_PRIMARY, then the following is handled by
+        * bond_change_active_slave().
         */
        if (!USES_PRIMARY(bond->params.mode)) {
                /* set promiscuity level to new slave */
@@ -1726,9 +1688,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
 
                netif_addr_lock_bh(bond_dev);
-               /* upload master's mc_list to new slave */
-               netdev_for_each_mc_addr(ha, bond_dev)
-                       dev_mc_add(slave_dev, ha->addr);
+
+               dev_mc_sync_multiple(slave_dev, bond_dev);
+               dev_uc_sync_multiple(slave_dev, bond_dev);
+
                netif_addr_unlock_bh(bond_dev);
        }
 
@@ -1758,6 +1721,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        new_slave->last_arp_rx = jiffies -
                (msecs_to_jiffies(bond->params.arp_interval) + 1);
+       for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+               new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
 
        if (bond->params.miimon && !bond->params.use_carrier) {
                link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1907,11 +1872,9 @@ err_dest_symlinks:
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
 
 err_detach:
-       if (!USES_PRIMARY(bond->params.mode)) {
-               netif_addr_lock_bh(bond_dev);
-               bond_mc_list_flush(bond_dev, slave_dev);
-               netif_addr_unlock_bh(bond_dev);
-       }
+       if (!USES_PRIMARY(bond->params.mode))
+               bond_hw_addr_flush(bond_dev, slave_dev);
+
        bond_del_vlans_from_slave(bond, slave_dev);
        write_lock_bh(&bond->lock);
        bond_detach_slave(bond, new_slave);
@@ -1956,6 +1919,10 @@ err_free:
 
 err_undo_flags:
        bond_compute_features(bond);
+       /* Enslave of first slave has failed and we need to fix master's mac */
+       if (bond->slave_cnt == 0 &&
+           ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+               eth_hw_addr_random(bond_dev);
 
        return res;
 }
@@ -2106,9 +2073,8 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        bond_del_vlans_from_slave(bond, slave_dev);
 
-       /* If the mode USES_PRIMARY, then we should only remove its
-        * promisc and mc settings if it was the curr_active_slave, but that was
-        * already taken care of above when we detached the slave
+       /* If the mode USES_PRIMARY, then this cases was handled above by
+        * bond_change_active_slave(..., NULL)
         */
        if (!USES_PRIMARY(bond->params.mode)) {
                /* unset promiscuity level from slave */
@@ -2119,10 +2085,7 @@ static int __bond_release_one(struct net_device *bond_dev,
                if (bond_dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(slave_dev, -1);
 
-               /* flush master's mc_list from slave */
-               netif_addr_lock_bh(bond_dev);
-               bond_mc_list_flush(bond_dev, slave_dev);
-               netif_addr_unlock_bh(bond_dev);
+               bond_hw_addr_flush(bond_dev, slave_dev);
        }
 
        bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -2555,8 +2518,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
 {
        struct sk_buff *skb;
 
-       pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
-                slave_dev->name, dest_ip, src_ip, vlan_id);
+       pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
+                slave_dev->name, &dest_ip, &src_ip, vlan_id);
 
        skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
                         NULL, slave_dev->dev_addr, NULL);
@@ -2588,7 +2551,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                __be32 addr;
                if (!targets[i])
                        break;
-               pr_debug("basa: target %x\n", targets[i]);
+               pr_debug("basa: target %pI4\n", &targets[i]);
                if (!bond_vlan_used(bond)) {
                        pr_debug("basa: empty vlan: arp_send\n");
                        addr = bond_confirm_addr(bond->dev, targets[i], 0);
@@ -2659,18 +2622,19 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
 {
        int i;
-       __be32 *targets = bond->params.arp_targets;
 
-       for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
-               pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
-                        &sip, &tip, i, &targets[i],
-                        bond_has_this_ip(bond, tip));
-               if (sip == targets[i]) {
-                       if (bond_has_this_ip(bond, tip))
-                               slave->last_arp_rx = jiffies;
-                       return;
-               }
+       if (!sip || !bond_has_this_ip(bond, tip)) {
+               pr_debug("bva: sip %pI4 tip %pI4 not found\n", &sip, &tip);
+               return;
        }
+
+       i = bond_get_targets_ip(bond->params.arp_targets, sip);
+       if (i == -1) {
+               pr_debug("bva: sip %pI4 not found in targets\n", &sip);
+               return;
+       }
+       slave->last_arp_rx = jiffies;
+       slave->target_last_arp_rx[i] = jiffies;
 }
 
 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
@@ -2685,6 +2649,10 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
                return RX_HANDLER_ANOTHER;
 
        read_lock(&bond->lock);
+
+       if (!slave_do_arp_validate(bond, slave))
+               goto out_unlock;
+
        alen = arp_hdr_len(bond->dev);
 
        pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
@@ -2724,10 +2692,17 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
         * configuration, the ARP probe will (hopefully) travel from
         * the active, through one switch, the router, then the other
         * switch before reaching the backup.
+        *
+        * We 'trust' the arp requests if there is an active slave and
+        * it received valid arp reply(s) after it became active. This
+        * is done to avoid endless looping when we can't reach the
+        * arp_ip_target and fool ourselves with our own arp requests.
         */
        if (bond_is_active_slave(slave))
                bond_validate_arp(bond, slave, sip, tip);
-       else
+       else if (bond->curr_active_slave &&
+                time_after(slave_last_rx(bond, bond->curr_active_slave),
+                           bond->curr_active_slave->jiffies))
                bond_validate_arp(bond, slave, tip, sip);
 
 out_unlock:
@@ -3276,7 +3251,7 @@ static int bond_slave_netdev_event(unsigned long event,
 static int bond_netdev_event(struct notifier_block *this,
                             unsigned long event, void *ptr)
 {
-       struct net_device *event_dev = (struct net_device *)ptr;
+       struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
 
        pr_debug("event_dev: %s, event: %lx\n",
                 event_dev ? event_dev->name : "None",
@@ -3659,19 +3634,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
        return res;
 }
 
-static bool bond_addr_in_mc_list(unsigned char *addr,
-                                struct netdev_hw_addr_list *list,
-                                int addrlen)
-{
-       struct netdev_hw_addr *ha;
-
-       netdev_hw_addr_list_for_each(ha, list)
-               if (!memcmp(ha->addr, addr, addrlen))
-                       return true;
-
-       return false;
-}
-
 static void bond_change_rx_flags(struct net_device *bond_dev, int change)
 {
        struct bonding *bond = netdev_priv(bond_dev);
@@ -3685,35 +3647,29 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
                                  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
 }
 
-static void bond_set_multicast_list(struct net_device *bond_dev)
+static void bond_set_rx_mode(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct netdev_hw_addr *ha;
-       bool found;
+       struct slave *slave;
+       int i;
 
        read_lock(&bond->lock);
 
-       /* looking for addresses to add to slaves' mc list */
-       netdev_for_each_mc_addr(ha, bond_dev) {
-               found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
-                                            bond_dev->addr_len);
-               if (!found)
-                       bond_mc_add(bond, ha->addr);
-       }
-
-       /* looking for addresses to delete from slaves' list */
-       netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
-               found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
-                                            bond_dev->addr_len);
-               if (!found)
-                       bond_mc_del(bond, ha->addr);
+       if (USES_PRIMARY(bond->params.mode)) {
+               read_lock(&bond->curr_slave_lock);
+               slave = bond->curr_active_slave;
+               if (slave) {
+                       dev_uc_sync(slave->dev, bond_dev);
+                       dev_mc_sync(slave->dev, bond_dev);
+               }
+               read_unlock(&bond->curr_slave_lock);
+       } else {
+               bond_for_each_slave(bond, slave, i) {
+                       dev_uc_sync_multiple(slave->dev, bond_dev);
+                       dev_mc_sync_multiple(slave->dev, bond_dev);
+               }
        }
 
-       /* save master's multicast list */
-       __hw_addr_flush(&bond->mc_list);
-       __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
-                              bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
-
        read_unlock(&bond->lock);
 }
 
@@ -3858,11 +3814,10 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
        pr_debug("bond=%p, name=%s\n",
                 bond, bond_dev ? bond_dev->name : "None");
 
-       /*
-        * If fail_over_mac is set to active, do nothing and return
-        * success.  Returning an error causes ifenslave to fail.
+       /* If fail_over_mac is enabled, do nothing and return success.
+        * Returning an error causes ifenslave to fail.
         */
-       if (bond->params.fail_over_mac == BOND_FOM_ACTIVE)
+       if (bond->params.fail_over_mac)
                return 0;
 
        if (!is_valid_ether_addr(sa->sa_data))
@@ -4320,7 +4275,7 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_get_stats64        = bond_get_stats,
        .ndo_do_ioctl           = bond_do_ioctl,
        .ndo_change_rx_flags    = bond_change_rx_flags,
-       .ndo_set_rx_mode        = bond_set_multicast_list,
+       .ndo_set_rx_mode        = bond_set_rx_mode,
        .ndo_change_mtu         = bond_change_mtu,
        .ndo_set_mac_address    = bond_set_mac_address,
        .ndo_neigh_setup        = bond_neigh_setup,
@@ -4425,8 +4380,6 @@ static void bond_uninit(struct net_device *bond_dev)
 
        bond_debug_unregister(bond);
 
-       __hw_addr_flush(&bond->mc_list);
-
        list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
                list_del(&vlan->vlan_list);
                kfree(vlan);
@@ -4470,7 +4423,8 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
 
 static int bond_check_params(struct bond_params *params)
 {
-       int arp_validate_value, fail_over_mac_value, primary_reselect_value;
+       int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
+       int arp_all_targets_value;
 
        /*
         * Convert string parameters.
@@ -4650,19 +4604,22 @@ static int bond_check_params(struct bond_params *params)
                arp_interval = BOND_LINK_ARP_INTERV;
        }
 
-       for (arp_ip_count = 0;
-            (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count];
-            arp_ip_count++) {
+       for (arp_ip_count = 0, i = 0;
+            (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
                /* not complete check, but should be good enough to
                   catch mistakes */
-               __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
-               if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
-                   ip == 0 || ip == htonl(INADDR_BROADCAST)) {
+               __be32 ip = in_aton(arp_ip_target[i]);
+               if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
+                   ip == htonl(INADDR_BROADCAST)) {
                        pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
-                                  arp_ip_target[arp_ip_count]);
+                                  arp_ip_target[i]);
                        arp_interval = 0;
                } else {
-                       arp_target[arp_ip_count] = ip;
+                       if (bond_get_targets_ip(arp_target, ip) == -1)
+                               arp_target[arp_ip_count++] = ip;
+                       else
+                               pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
+                                          &ip);
                }
        }
 
@@ -4693,11 +4650,21 @@ static int bond_check_params(struct bond_params *params)
        } else
                arp_validate_value = 0;
 
+       arp_all_targets_value = 0;
+       if (arp_all_targets) {
+               arp_all_targets_value = bond_parse_parm(arp_all_targets,
+                                                       arp_all_targets_tbl);
+
+               if (arp_all_targets_value == -1) {
+                       pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
+                              arp_all_targets);
+                       arp_all_targets_value = 0;
+               }
+       }
+
        if (miimon) {
                pr_info("MII link monitoring set to %d ms\n", miimon);
        } else if (arp_interval) {
-               int i;
-
                pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
                        arp_interval,
                        arp_validate_tbl[arp_validate_value].modename,
@@ -4759,6 +4726,7 @@ static int bond_check_params(struct bond_params *params)
        params->num_peer_notif = num_peer_notif;
        params->arp_interval = arp_interval;
        params->arp_validate = arp_validate_value;
+       params->arp_all_targets = arp_all_targets_value;
        params->updelay = updelay;
        params->downdelay = downdelay;
        params->use_carrier = use_carrier;
@@ -4840,7 +4808,6 @@ static int bond_init(struct net_device *bond_dev)
                bond->dev_addr_from_first = true;
        }
 
-       __hw_addr_init(&bond->mc_list);
        return 0;
 }
 
@@ -4913,7 +4880,7 @@ static int __net_init bond_net_init(struct net *net)
 
        bond_create_proc_dir(bn);
        bond_create_sysfs(bn);
-       
+
        return 0;
 }
 
index 94d06f1307b850f927c85b4b7645f49859b4772b..4060d41f0ee7b15bc228b6bda9488a9e6cc9c821 100644 (file)
@@ -130,7 +130,7 @@ static void bond_info_show_master(struct seq_file *seq)
                seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
                           ad_select_tbl[bond->params.ad_select].modename);
 
-               if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+               if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
                        seq_printf(seq, "bond %s has no active aggregator\n",
                                   bond->dev->name);
                } else {
index ea7a388f484306710a33375f3a553fd1ecc5b621..dc36a3d7d9e983a15583260a572e6dac7451acb5 100644 (file)
@@ -231,8 +231,7 @@ static ssize_t bonding_show_slaves(struct device *d,
 }
 
 /*
- * Set the slaves in the current bond.  The bond interface must be
- * up for this to succeed.
+ * Set the slaves in the current bond.
  * This is supposed to be only thin wrapper for bond_enslave and bond_release.
  * All hard work should be done there.
  */
@@ -316,6 +315,9 @@ static ssize_t bonding_store_mode(struct device *d,
        int new_value, ret = count;
        struct bonding *bond = to_bond(d);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (bond->dev->flags & IFF_UP) {
                pr_err("unable to update mode of %s because interface is up.\n",
                       bond->dev->name);
@@ -352,6 +354,7 @@ static ssize_t bonding_store_mode(struct device *d,
                bond->dev->name, bond_mode_tbl[new_value].modename,
                new_value);
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
@@ -359,7 +362,6 @@ static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
 
 /*
  * Show and set the bonding transmit hash method.
- * The bond interface must be down to change the xmit hash policy.
  */
 static ssize_t bonding_show_xmit_hash(struct device *d,
                                      struct device_attribute *attr,
@@ -379,20 +381,12 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
        int new_value, ret = count;
        struct bonding *bond = to_bond(d);
 
-       if (bond->dev->flags & IFF_UP) {
-               pr_err("%s: Interface is up. Unable to update xmit policy.\n",
-                      bond->dev->name);
-               ret = -EPERM;
-               goto out;
-       }
-
        new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
        if (new_value < 0)  {
                pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
                       bond->dev->name,
                       (int)strlen(buf) - 1, buf);
                ret = -EINVAL;
-               goto out;
        } else {
                bond->params.xmit_policy = new_value;
                bond_set_mode_ops(bond, bond->params.mode);
@@ -400,7 +394,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
                        bond->dev->name,
                        xmit_hashtype_tbl[new_value].modename, new_value);
        }
-out:
+
        return ret;
 }
 static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
@@ -449,6 +443,44 @@ static ssize_t bonding_store_arp_validate(struct device *d,
 
 static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
                   bonding_store_arp_validate);
+/*
+ * Show and set arp_all_targets.
+ */
+static ssize_t bonding_show_arp_all_targets(struct device *d,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct bonding *bond = to_bond(d);
+       int value = bond->params.arp_all_targets;
+
+       return sprintf(buf, "%s %d\n", arp_all_targets_tbl[value].modename,
+                      value);
+}
+
+static ssize_t bonding_store_arp_all_targets(struct device *d,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       struct bonding *bond = to_bond(d);
+       int new_value;
+
+       new_value = bond_parse_parm(buf, arp_all_targets_tbl);
+       if (new_value < 0) {
+               pr_err("%s: Ignoring invalid arp_all_targets value %s\n",
+                      bond->dev->name, buf);
+               return -EINVAL;
+       }
+       pr_info("%s: setting arp_all_targets to %s (%d).\n",
+               bond->dev->name, arp_all_targets_tbl[new_value].modename,
+               new_value);
+
+       bond->params.arp_all_targets = new_value;
+
+       return count;
+}
+
+static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
+                  bonding_show_arp_all_targets, bonding_store_arp_all_targets);
 
 /*
  * Show and store fail_over_mac.  User only allowed to change the
@@ -596,10 +628,11 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                                         struct device_attribute *attr,
                                         const char *buf, size_t count)
 {
-       __be32 newtarget;
-       int i = 0, done = 0, ret = count;
        struct bonding *bond = to_bond(d);
-       __be32 *targets;
+       struct slave *slave;
+       __be32 newtarget, *targets;
+       unsigned long *targets_rx;
+       int ind, i, j, ret = -EINVAL;
 
        targets = bond->params.arp_targets;
        newtarget = in_aton(buf + 1);
@@ -608,57 +641,63 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
                        pr_err("%s: invalid ARP target %pI4 specified for addition\n",
                               bond->dev->name, &newtarget);
-                       ret = -EINVAL;
                        goto out;
                }
-               /* look for an empty slot to put the target in, and check for dupes */
-               for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
-                       if (targets[i] == newtarget) { /* duplicate */
-                               pr_err("%s: ARP target %pI4 is already present\n",
-                                      bond->dev->name, &newtarget);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-                       if (targets[i] == 0) {
-                               pr_info("%s: adding ARP target %pI4.\n",
-                                       bond->dev->name, &newtarget);
-                               done = 1;
-                               targets[i] = newtarget;
-                       }
+
+               if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
+                       pr_err("%s: ARP target %pI4 is already present\n",
+                              bond->dev->name, &newtarget);
+                       goto out;
                }
-               if (!done) {
+
+               ind = bond_get_targets_ip(targets, 0); /* first free slot */
+               if (ind == -1) {
                        pr_err("%s: ARP target table is full!\n",
                               bond->dev->name);
-                       ret = -EINVAL;
                        goto out;
                }
 
+               pr_info("%s: adding ARP target %pI4.\n", bond->dev->name,
+                        &newtarget);
+               /* not to race with bond_arp_rcv */
+               write_lock_bh(&bond->lock);
+               bond_for_each_slave(bond, slave, i)
+                       slave->target_last_arp_rx[ind] = jiffies;
+               targets[ind] = newtarget;
+               write_unlock_bh(&bond->lock);
        } else if (buf[0] == '-')       {
                if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
                        pr_err("%s: invalid ARP target %pI4 specified for removal\n",
                               bond->dev->name, &newtarget);
-                       ret = -EINVAL;
                        goto out;
                }
 
-               for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
-                       if (targets[i] == newtarget) {
-                               int j;
-                               pr_info("%s: removing ARP target %pI4.\n",
-                                       bond->dev->name, &newtarget);
-                               for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
-                                       targets[j] = targets[j+1];
-
-                               targets[j] = 0;
-                               done = 1;
-                       }
-               }
-               if (!done) {
-                       pr_info("%s: unable to remove nonexistent ARP target %pI4.\n",
+               ind = bond_get_targets_ip(targets, newtarget);
+               if (ind == -1) {
+                       pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
                                bond->dev->name, &newtarget);
-                       ret = -EINVAL;
                        goto out;
                }
+
+               if (ind == 0 && !targets[1] && bond->params.arp_interval)
+                       pr_warn("%s: removing last arp target with arp_interval on\n",
+                               bond->dev->name);
+
+               pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
+                       &newtarget);
+
+               write_lock_bh(&bond->lock);
+               bond_for_each_slave(bond, slave, i) {
+                       targets_rx = slave->target_last_arp_rx;
+                       j = ind;
+                       for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
+                               targets_rx[j] = targets_rx[j+1];
+                       targets_rx[j] = 0;
+               }
+               for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+                       targets[i] = targets[i+1];
+               targets[i] = 0;
+               write_unlock_bh(&bond->lock);
        } else {
                pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
                       bond->dev->name);
@@ -666,6 +705,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                goto out;
        }
 
+       ret = count;
 out:
        return ret;
 }
@@ -1315,7 +1355,6 @@ static ssize_t bonding_show_mii_status(struct device *d,
 }
 static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
 
-
 /*
  * Show current 802.3ad aggregator ID.
  */
@@ -1329,7 +1368,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.aggregator_id);
        }
 
@@ -1351,7 +1390,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.ports);
        }
 
@@ -1373,7 +1412,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.actor_key);
        }
 
@@ -1395,7 +1434,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.partner_key);
        }
 
@@ -1642,6 +1681,7 @@ static struct attribute *per_bond_attrs[] = {
        &dev_attr_mode.attr,
        &dev_attr_fail_over_mac.attr,
        &dev_attr_arp_validate.attr,
+       &dev_attr_arp_all_targets.attr,
        &dev_attr_arp_interval.attr,
        &dev_attr_arp_ip_target.attr,
        &dev_attr_downdelay.attr,
index 2baec24388b1b2bc1370d3f8aceb229226cc770c..3fb73cc8c34a877d72c6997675bb6c633becd689 100644 (file)
@@ -144,6 +144,7 @@ struct bond_params {
        u8 num_peer_notif;
        int arp_interval;
        int arp_validate;
+       int arp_all_targets;
        int use_carrier;
        int fail_over_mac;
        int updelay;
@@ -179,6 +180,7 @@ struct slave {
        int    delay;
        unsigned long jiffies;
        unsigned long last_arp_rx;
+       unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
        s8     link;    /* one of BOND_LINK_XXXX */
        s8     new_link;
        u8     backup:1,   /* indicates backup slave. Value corresponds with
@@ -225,13 +227,12 @@ struct bonding {
        rwlock_t curr_slave_lock;
        u8       send_peer_notif;
        s8       setup_by_slave;
-       s8       igmp_retrans;
+       u8       igmp_retrans;
 #ifdef CONFIG_PROC_FS
        struct   proc_dir_entry *proc_entry;
        char     proc_file_name[IFNAMSIZ];
 #endif /* CONFIG_PROC_FS */
        struct   list_head bond_list;
-       struct   netdev_hw_addr_list mc_list;
        int      (*xmit_hash_policy)(struct sk_buff *, int);
        u16      rr_tx_counter;
        struct   ad_bond_info ad_info;
@@ -323,6 +324,9 @@ static inline bool bond_is_active_slave(struct slave *slave)
 #define BOND_FOM_ACTIVE                        1
 #define BOND_FOM_FOLLOW                        2
 
+#define BOND_ARP_TARGETS_ANY           0
+#define BOND_ARP_TARGETS_ALL           1
+
 #define BOND_ARP_VALIDATE_NONE         0
 #define BOND_ARP_VALIDATE_ACTIVE       (1 << BOND_STATE_ACTIVE)
 #define BOND_ARP_VALIDATE_BACKUP       (1 << BOND_STATE_BACKUP)
@@ -335,11 +339,31 @@ static inline int slave_do_arp_validate(struct bonding *bond,
        return bond->params.arp_validate & (1 << bond_slave_state(slave));
 }
 
+/* Get the oldest arp which we've received on this slave for bond's
+ * arp_targets.
+ */
+static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
+                                                      struct slave *slave)
+{
+       int i = 1;
+       unsigned long ret = slave->target_last_arp_rx[0];
+
+       for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++)
+               if (time_before(slave->target_last_arp_rx[i], ret))
+                       ret = slave->target_last_arp_rx[i];
+
+       return ret;
+}
+
 static inline unsigned long slave_last_rx(struct bonding *bond,
                                        struct slave *slave)
 {
-       if (slave_do_arp_validate(bond, slave))
-               return slave->last_arp_rx;
+       if (slave_do_arp_validate(bond, slave)) {
+               if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+                       return slave_oldest_target_arp_rx(bond, slave);
+               else
+                       return slave->last_arp_rx;
+       }
 
        return slave->dev->last_rx;
 }
@@ -465,12 +489,29 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
        return NULL;
 }
 
+/* Check if the ip is present in arp ip list, or first free slot if ip == 0
+ * Returns -1 if not found, index if found
+ */
+static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
+{
+       int i;
+
+       for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+               if (targets[i] == ip)
+                       return i;
+               else if (targets[i] == 0)
+                       break;
+
+       return -1;
+}
+
 /* exported from bond_main.c */
 extern int bond_net_id;
 extern const struct bond_parm_tbl bond_lacp_tbl[];
 extern const struct bond_parm_tbl bond_mode_tbl[];
 extern const struct bond_parm_tbl xmit_hashtype_tbl[];
 extern const struct bond_parm_tbl arp_validate_tbl[];
+extern const struct bond_parm_tbl arp_all_targets_tbl[];
 extern const struct bond_parm_tbl fail_over_mac_tbl[];
 extern const struct bond_parm_tbl pri_reselect_tbl[];
 extern struct bond_parm_tbl ad_select_tbl[];
index 7ffc756131a214a7b896f81d05e9c499c12cf752..547098086773cd06a936a4010bb8b66ffc933dfd 100644 (file)
@@ -43,7 +43,7 @@ config CAIF_HSI
 
 config CAIF_VIRTIO
        tristate "CAIF virtio transport driver"
-       depends on CAIF
+       depends on CAIF && HAS_DMA
        select VHOST_RING
        select VIRTIO
        select GENERIC_ALLOCATOR
index e456b70933c230abd7703ca3f6abed853679e5ef..3c069472eb8b6ddfc4f134209ccf92b51ac0ee84 100644 (file)
@@ -102,12 +102,9 @@ config CAN_JANZ_ICAN3
          This driver can also be built as a module. If so, the module will be
          called janz-ican3.ko.
 
-config HAVE_CAN_FLEXCAN
-       bool
-
 config CAN_FLEXCAN
        tristate "Support for Freescale FLEXCAN based chips"
-       depends on HAVE_CAN_FLEXCAN
+       depends on ARM || PPC
        ---help---
          Say Y here if you want to support for Freescale FlexCAN.
 
index db52f4414def171a4b76fffad88231589ddcc5c5..ce8421ac453a4d5a0252b1b0e6b4d87e6acb6f91 100644 (file)
@@ -1220,7 +1220,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
                goto out;
        }
 
-       err = strict_strtoul(buf, 0, &can_id);
+       err = kstrtoul(buf, 0, &can_id);
        if (err) {
                ret = err;
                goto out;
@@ -1393,8 +1393,6 @@ static int at91_can_remove(struct platform_device *pdev)
 
        unregister_netdev(dev);
 
-       platform_set_drvdata(pdev, NULL);
-
        iounmap(priv->reg_base);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index d4a15e82bfc0954860999e5831a043ff53b07d9a..a2700d25ff0ed87fb800d4c157aeef911234d26e 100644 (file)
@@ -580,7 +580,7 @@ static int bfin_can_probe(struct platform_device *pdev)
        priv->pin_list = pdata;
        priv->can.clock.freq = get_sclk();
 
-       dev_set_drvdata(&pdev->dev, dev);
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        dev->flags |= IFF_ECHO; /* we support local echo */
@@ -613,7 +613,7 @@ exit:
 
 static int bfin_can_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
        struct bfin_can_priv *priv = netdev_priv(dev);
        struct resource *res;
 
@@ -621,8 +621,6 @@ static int bfin_can_remove(struct platform_device *pdev)
 
        unregister_candev(dev);
 
-       dev_set_drvdata(&pdev->dev, NULL);
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(res->start, resource_size(res));
 
@@ -635,7 +633,7 @@ static int bfin_can_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
 {
-       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
        struct bfin_can_priv *priv = netdev_priv(dev);
        struct bfin_can_regs __iomem *reg = priv->membase;
        int timeout = BFIN_CAN_TIMEOUT;
@@ -658,7 +656,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
 
 static int bfin_can_resume(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
        struct bfin_can_priv *priv = netdev_priv(dev);
        struct bfin_can_regs __iomem *reg = priv->membase;
 
index d63b91904f829e9954596b06c0e34f48e3d1ba3b..b918c73294265fdcf56737d20c6761cefd82874e 100644 (file)
@@ -201,8 +201,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
                        priv->instance = pdev->id;
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               priv->raminit_ctrlreg = devm_request_and_ioremap(&pdev->dev, res);
-               if (!priv->raminit_ctrlreg || priv->instance < 0)
+               priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
                        priv->raminit = c_can_hw_raminit;
@@ -234,7 +234,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
        return 0;
 
 exit_free_device:
-       platform_set_drvdata(pdev, NULL);
        free_c_can_dev(dev);
 exit_iounmap:
        iounmap(addr);
@@ -255,7 +254,6 @@ static int c_can_plat_remove(struct platform_device *pdev)
        struct resource *mem;
 
        unregister_c_can_dev(dev);
-       platform_set_drvdata(pdev, NULL);
 
        free_c_can_dev(dev);
        iounmap(priv->base);
index 8eaaac81f320a70040c6af68f6acabec51316134..87a47c0cfd49185b1c5f9198a5945a3e9a51161a 100644 (file)
@@ -265,7 +265,7 @@ static int cc770_isa_probe(struct platform_device *pdev)
        else
                priv->clkout = COR_DEFAULT;
 
-       dev_set_drvdata(&pdev->dev, dev);
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        err = register_cc770dev(dev);
@@ -293,12 +293,11 @@ static int cc770_isa_probe(struct platform_device *pdev)
 
 static int cc770_isa_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
        struct cc770_priv *priv = netdev_priv(dev);
        int idx = pdev->id;
 
        unregister_cc770dev(dev);
-       dev_set_drvdata(&pdev->dev, NULL);
 
        if (mem[idx]) {
                iounmap(priv->reg_base);
index d0f6bfc45aea1fae0a824f65cb431f847e2ae578..034bdd816a60c74104b00b5b203c69f120f0555d 100644 (file)
@@ -216,7 +216,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
                 priv->reg_base, dev->irq, priv->can.clock.freq,
                 priv->cpu_interface, priv->bus_config, priv->clkout);
 
-       dev_set_drvdata(&pdev->dev, dev);
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        err = register_cc770dev(dev);
@@ -240,7 +240,7 @@ exit_release_mem:
 
 static int cc770_platform_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
        struct cc770_priv *priv = netdev_priv(dev);
        struct resource *mem;
 
index 769d29ed106dbb1336745510b7fa2ff60f387bf7..f873b9f8d4d4b915411e7963394a52e37f67b47a 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/pinctrl/consumer.h>
 
 #define DRV_NAME                       "flexcan"
 
@@ -1004,16 +1003,11 @@ static int flexcan_probe(struct platform_device *pdev)
        struct flexcan_priv *priv;
        struct resource *mem;
        struct clk *clk_ipg = NULL, *clk_per = NULL;
-       struct pinctrl *pinctrl;
        void __iomem *base;
        resource_size_t mem_size;
        int err, irq;
        u32 clock_freq = 0;
 
-       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(pinctrl))
-               return PTR_ERR(pinctrl);
-
        if (pdev->dev.of_node)
                of_property_read_u32(pdev->dev.of_node,
                                                "clock-frequency", &clock_freq);
@@ -1127,7 +1121,6 @@ static int flexcan_remove(struct platform_device *pdev)
        struct resource *mem;
 
        unregister_flexcandev(dev);
-       platform_set_drvdata(pdev, NULL);
        iounmap(priv->base);
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1138,10 +1131,10 @@ static int flexcan_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int flexcan_suspend(struct device *device)
 {
-       struct net_device *dev = platform_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
 
        flexcan_chip_disable(priv);
@@ -1155,9 +1148,9 @@ static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
        return 0;
 }
 
-static int flexcan_resume(struct platform_device *pdev)
+static int flexcan_resume(struct device *device)
 {
-       struct net_device *dev = platform_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1169,21 +1162,19 @@ static int flexcan_resume(struct platform_device *pdev)
 
        return 0;
 }
-#else
-#define flexcan_suspend NULL
-#define flexcan_resume NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
 
 static struct platform_driver flexcan_driver = {
        .driver = {
                .name = DRV_NAME,
                .owner = THIS_MODULE,
+               .pm = &flexcan_pm_ops,
                .of_match_table = flexcan_of_match,
        },
        .probe = flexcan_probe,
        .remove = flexcan_remove,
-       .suspend = flexcan_suspend,
-       .resume = flexcan_resume,
        .id_table = flexcan_id_table,
 };
 
index 17fbc7a092247b556caf31c0c4adfa0a05b7f6b2..6aa737a243931f2db7f18842e6d5386ab4f812ae 100644 (file)
@@ -1646,7 +1646,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
        if (err)
                goto exit_free_candev;
 
-       dev_set_drvdata(&ofdev->dev, dev);
+       platform_set_drvdata(ofdev, dev);
 
        /* Reset device to allow bit-timing to be set. No need to call
         * grcan_reset at this stage. That is done in grcan_open.
@@ -1683,10 +1683,9 @@ static int grcan_probe(struct platform_device *ofdev)
        }
 
        res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
-       base = devm_request_and_ioremap(&ofdev->dev, res);
-       if (!base) {
-               dev_err(&ofdev->dev, "couldn't map IO resource\n");
-               err = -EADDRNOTAVAIL;
+       base = devm_ioremap_resource(&ofdev->dev, res);
+       if (IS_ERR(base)) {
+               err = PTR_ERR(base);
                goto exit_error;
        }
 
@@ -1716,13 +1715,12 @@ exit_error:
 
 static int grcan_remove(struct platform_device *ofdev)
 {
-       struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *dev = platform_get_drvdata(ofdev);
        struct grcan_priv *priv = netdev_priv(dev);
 
        unregister_candev(dev); /* Will in turn call grcan_close */
 
        irq_dispose_mapping(dev->irq);
-       dev_set_drvdata(&ofdev->dev, NULL);
        netif_napi_del(&priv->napi);
        free_candev(dev);
 
index c4bc1d2e2033214db1a20ff584d184f35d8635ec..36bd6fa1c7f3e4760b5f69f65b420f8d55b66a5f 100644 (file)
@@ -1734,7 +1734,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
        unsigned long enable;
        int ret;
 
-       if (strict_strtoul(buf, 0, &enable))
+       if (kstrtoul(buf, 0, &enable))
                return -EINVAL;
 
        ret = ican3_set_termination(mod, enable);
index f27fca65dc4a3590f99e3e55428326c19e937425..a3d99a8fd2d19ee46f846448d2707f8dadf3e4af 100644 (file)
@@ -88,9 +88,9 @@ EXPORT_SYMBOL_GPL(devm_can_led_init);
 
 /* NETDEV rename notifier to rename the associated led triggers too */
 static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
-                       void *data)
+                           void *ptr)
 {
-       struct net_device *netdev = data;
+       struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
        struct can_priv *priv = safe_candev_priv(netdev);
        char name[CAN_LED_NAME_SZ];
 
index 668850e441dcd7e3c13e901efc2cc1e0f3473f53..5b0ee8ef5885e54a610030b9318445bae8aaf321 100644 (file)
@@ -302,7 +302,7 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
                goto exit_free_mscan;
        }
 
-       dev_set_drvdata(&ofdev->dev, dev);
+       platform_set_drvdata(ofdev, dev);
 
        dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
                 priv->reg_base, dev->irq, priv->can.clock.freq);
@@ -321,11 +321,9 @@ exit_unmap_mem:
 
 static int mpc5xxx_can_remove(struct platform_device *ofdev)
 {
-       struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *dev = platform_get_drvdata(ofdev);
        struct mscan_priv *priv = netdev_priv(dev);
 
-       dev_set_drvdata(&ofdev->dev, NULL);
-
        unregister_mscandev(dev);
        iounmap(priv->reg_base);
        irq_dispose_mapping(dev->irq);
@@ -338,7 +336,7 @@ static int mpc5xxx_can_remove(struct platform_device *ofdev)
 static struct mscan_regs saved_regs;
 static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state)
 {
-       struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *dev = platform_get_drvdata(ofdev);
        struct mscan_priv *priv = netdev_priv(dev);
        struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
 
@@ -349,7 +347,7 @@ static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state
 
 static int mpc5xxx_can_resume(struct platform_device *ofdev)
 {
-       struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *dev = platform_get_drvdata(ofdev);
        struct mscan_priv *priv = netdev_priv(dev);
        struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
 
index 5c8da46614892504c1a30972f3199ffd7309b68a..06a282397fff076816818ba7c591ad78f528e7bf 100644 (file)
@@ -197,7 +197,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
        else
                priv->cdr = CDR_DEFAULT;
 
-       dev_set_drvdata(&pdev->dev, dev);
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        err = register_sja1000dev(dev);
@@ -225,12 +225,11 @@ static int sja1000_isa_probe(struct platform_device *pdev)
 
 static int sja1000_isa_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
        struct sja1000_priv *priv = netdev_priv(dev);
        int idx = pdev->id;
 
        unregister_sja1000dev(dev);
-       dev_set_drvdata(&pdev->dev, NULL);
 
        if (mem[idx]) {
                iounmap(priv->reg_base);
index 8e0c4a0019397f61af74d44da1b60ca0c61032d2..31ad33911167058f06d1690ee62ca7b38e3eef1e 100644 (file)
@@ -72,13 +72,11 @@ static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
 
 static int sja1000_ofp_remove(struct platform_device *ofdev)
 {
-       struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *dev = platform_get_drvdata(ofdev);
        struct sja1000_priv *priv = netdev_priv(dev);
        struct device_node *np = ofdev->dev.of_node;
        struct resource res;
 
-       dev_set_drvdata(&ofdev->dev, NULL);
-
        unregister_sja1000dev(dev);
        free_sja1000dev(dev);
        iounmap(priv->reg_base);
@@ -181,7 +179,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
                 priv->reg_base, dev->irq, priv->can.clock.freq,
                 priv->ocr, priv->cdr);
 
-       dev_set_drvdata(&ofdev->dev, dev);
+       platform_set_drvdata(ofdev, dev);
        SET_NETDEV_DEV(dev, &ofdev->dev);
 
        err = register_sja1000dev(dev);
index 21619bb5b869282a2d0a2f20090d93ea35908282..8e259c541036c575fc181796ebfe21f0c8798a1d 100644 (file)
@@ -135,7 +135,7 @@ static int sp_probe(struct platform_device *pdev)
                break;
        }
 
-       dev_set_drvdata(&pdev->dev, dev);
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        err = register_sja1000dev(dev);
@@ -161,12 +161,11 @@ static int sp_probe(struct platform_device *pdev)
 
 static int sp_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
        struct sja1000_priv *priv = netdev_priv(dev);
        struct resource *res;
 
        unregister_sja1000dev(dev);
-       dev_set_drvdata(&pdev->dev, NULL);
 
        if (priv->reg_base)
                iounmap(priv->reg_base);
index 06b7e097d36e5ed3117e9d1dc8258ee74d51c6be..874188ba06f7172fed36b93db05be06ae0e2bd59 100644 (file)
@@ -161,7 +161,7 @@ static void slc_bump(struct slcan *sl)
 
        sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
 
-       if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
+       if (kstrtoul(sl->rbuff+1, 16, &ultmp))
                return;
 
        cf.can_id = ultmp;
index 3a2b45601ec29d69f07ec4dab98425ebb09add15..65eef1eea2e2434ca47cfeb086095a415d2b385e 100644 (file)
@@ -594,7 +594,7 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret < 0)
                return ret;
        val &= 0xFF;
index f21fc37ec578d3926a09cda2706e2be610e593d8..3a349a22d5bc46eed31bdc32e12d27c25df3bd13 100644 (file)
@@ -1001,7 +1001,6 @@ static int ti_hecc_remove(struct platform_device *pdev)
        iounmap(priv->base);
        release_mem_region(res->start, resource_size(res));
        free_candev(ndev);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 9b74d1e3ad44a51c950257b6588bf082247a75d4..6aa7b3266c80904d8d2f2106869085a8b19d5248 100644 (file)
@@ -612,9 +612,15 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
 {
        struct esd_usb2 *dev = priv->usb2;
        struct net_device *netdev = priv->netdev;
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
        int err, i;
 
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        /*
         * Enable all IDs
         * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
@@ -628,33 +634,32 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
         * the number of the starting bitmask (0..64) to the filter.option
         * field followed by only some bitmasks.
         */
-       msg.msg.hdr.cmd = CMD_IDADD;
-       msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
-       msg.msg.filter.net = priv->index;
-       msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+       msg->msg.hdr.cmd = CMD_IDADD;
+       msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+       msg->msg.filter.net = priv->index;
+       msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
        for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
-               msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff);
+               msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff);
        /* enable 29bit extended IDs */
-       msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
+       msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
 
-       err = esd_usb2_send_msg(dev, &msg);
+       err = esd_usb2_send_msg(dev, msg);
        if (err)
-               goto failed;
+               goto out;
 
        err = esd_usb2_setup_rx_urbs(dev);
        if (err)
-               goto failed;
+               goto out;
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
-       return 0;
-
-failed:
+out:
        if (err == -ENODEV)
                netif_device_detach(netdev);
+       if (err)
+               netdev_err(netdev, "couldn't start device: %d\n", err);
 
-       netdev_err(netdev, "couldn't start device: %d\n", err);
-
+       kfree(msg);
        return err;
 }
 
@@ -833,26 +838,30 @@ nourbmem:
 static int esd_usb2_close(struct net_device *netdev)
 {
        struct esd_usb2_net_priv *priv = netdev_priv(netdev);
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
        int i;
 
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
        /* Disable all IDs (see esd_usb2_start()) */
-       msg.msg.hdr.cmd = CMD_IDADD;
-       msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
-       msg.msg.filter.net = priv->index;
-       msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+       msg->msg.hdr.cmd = CMD_IDADD;
+       msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+       msg->msg.filter.net = priv->index;
+       msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
        for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
-               msg.msg.filter.mask[i] = 0;
-       if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+               msg->msg.filter.mask[i] = 0;
+       if (esd_usb2_send_msg(priv->usb2, msg) < 0)
                netdev_err(netdev, "sending idadd message failed\n");
 
        /* set CAN controller to reset mode */
-       msg.msg.hdr.len = 2;
-       msg.msg.hdr.cmd = CMD_SETBAUD;
-       msg.msg.setbaud.net = priv->index;
-       msg.msg.setbaud.rsvd = 0;
-       msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
-       if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+       msg->msg.hdr.len = 2;
+       msg->msg.hdr.cmd = CMD_SETBAUD;
+       msg->msg.setbaud.net = priv->index;
+       msg->msg.setbaud.rsvd = 0;
+       msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
+       if (esd_usb2_send_msg(priv->usb2, msg) < 0)
                netdev_err(netdev, "sending setbaud message failed\n");
 
        priv->can.state = CAN_STATE_STOPPED;
@@ -861,6 +870,8 @@ static int esd_usb2_close(struct net_device *netdev)
 
        close_candev(netdev);
 
+       kfree(msg);
+
        return 0;
 }
 
@@ -886,7 +897,8 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
 {
        struct esd_usb2_net_priv *priv = netdev_priv(netdev);
        struct can_bittiming *bt = &priv->can.bittiming;
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
+       int err;
        u32 canbtr;
        int sjw_shift;
 
@@ -912,15 +924,22 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
        if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
                canbtr |= ESD_USB2_3_SAMPLES;
 
-       msg.msg.hdr.len = 2;
-       msg.msg.hdr.cmd = CMD_SETBAUD;
-       msg.msg.setbaud.net = priv->index;
-       msg.msg.setbaud.rsvd = 0;
-       msg.msg.setbaud.baud = cpu_to_le32(canbtr);
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->msg.hdr.len = 2;
+       msg->msg.hdr.cmd = CMD_SETBAUD;
+       msg->msg.setbaud.net = priv->index;
+       msg->msg.setbaud.rsvd = 0;
+       msg->msg.setbaud.baud = cpu_to_le32(canbtr);
 
        netdev_info(netdev, "setting BTR=%#x\n", canbtr);
 
-       return esd_usb2_send_msg(priv->usb2, &msg);
+       err = esd_usb2_send_msg(priv->usb2, msg);
+
+       kfree(msg);
+       return err;
 }
 
 static int esd_usb2_get_berr_counter(const struct net_device *netdev,
@@ -1022,7 +1041,7 @@ static int esd_usb2_probe(struct usb_interface *intf,
                         const struct usb_device_id *id)
 {
        struct esd_usb2 *dev;
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
        int i, err;
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1037,27 +1056,33 @@ static int esd_usb2_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, dev);
 
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg) {
+               err = -ENOMEM;
+               goto free_msg;
+       }
+
        /* query number of CAN interfaces (nets) */
-       msg.msg.hdr.cmd = CMD_VERSION;
-       msg.msg.hdr.len = 2;
-       msg.msg.version.rsvd = 0;
-       msg.msg.version.flags = 0;
-       msg.msg.version.drv_version = 0;
+       msg->msg.hdr.cmd = CMD_VERSION;
+       msg->msg.hdr.len = 2;
+       msg->msg.version.rsvd = 0;
+       msg->msg.version.flags = 0;
+       msg->msg.version.drv_version = 0;
 
-       err = esd_usb2_send_msg(dev, &msg);
+       err = esd_usb2_send_msg(dev, msg);
        if (err < 0) {
                dev_err(&intf->dev, "sending version message failed\n");
-               goto free_dev;
+               goto free_msg;
        }
 
-       err = esd_usb2_wait_msg(dev, &msg);
+       err = esd_usb2_wait_msg(dev, msg);
        if (err < 0) {
                dev_err(&intf->dev, "no version message answer\n");
-               goto free_dev;
+               goto free_msg;
        }
 
-       dev->net_count = (int)msg.msg.version_reply.nets;
-       dev->version = le32_to_cpu(msg.msg.version_reply.version);
+       dev->net_count = (int)msg->msg.version_reply.nets;
+       dev->version = le32_to_cpu(msg->msg.version_reply.version);
 
        if (device_create_file(&intf->dev, &dev_attr_firmware))
                dev_err(&intf->dev,
@@ -1075,10 +1100,10 @@ static int esd_usb2_probe(struct usb_interface *intf,
        for (i = 0; i < dev->net_count; i++)
                esd_usb2_probe_one_net(intf, i);
 
-       return 0;
-
-free_dev:
-       kfree(dev);
+free_msg:
+       kfree(msg);
+       if (err)
+               kfree(dev);
 done:
        return err;
 }
index 45cb9f3c1324817a912c4541358b34eecce818ea..3b95465882401fc556c647071b8c27f5724a8dbf 100644 (file)
 #define KVASER_CTRL_MODE_SELFRECEPTION 3
 #define KVASER_CTRL_MODE_OFF           4
 
+/* log message */
+#define KVASER_EXTENDED_FRAME          BIT(31)
+
 struct kvaser_msg_simple {
        u8 tid;
        u8 channel;
@@ -817,8 +820,13 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
        priv = dev->nets[channel];
        stats = &priv->netdev->stats;
 
-       if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
-                                 MSG_FLAG_OVERRUN)) {
+       if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) &&
+           (msg->id == CMD_LOG_MESSAGE)) {
+               kvaser_usb_rx_error(dev, msg);
+               return;
+       } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+                                        MSG_FLAG_NERR |
+                                        MSG_FLAG_OVERRUN)) {
                kvaser_usb_rx_can_err(priv, msg);
                return;
        } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
@@ -834,22 +842,40 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
                return;
        }
 
-       cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
-                    (msg->u.rx_can.msg[1] & 0x3f);
-       cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+       if (msg->id == CMD_LOG_MESSAGE) {
+               cf->can_id = le32_to_cpu(msg->u.log_message.id);
+               if (cf->can_id & KVASER_EXTENDED_FRAME)
+                       cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+               else
+                       cf->can_id &= CAN_SFF_MASK;
 
-       if (msg->id == CMD_RX_EXT_MESSAGE) {
-               cf->can_id <<= 18;
-               cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
-                             ((msg->u.rx_can.msg[3] & 0xff) << 6) |
-                             (msg->u.rx_can.msg[4] & 0x3f);
-               cf->can_id |= CAN_EFF_FLAG;
-       }
+               cf->can_dlc = get_can_dlc(msg->u.log_message.dlc);
 
-       if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
-               cf->can_id |= CAN_RTR_FLAG;
-       else
-               memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
+               if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+                       cf->can_id |= CAN_RTR_FLAG;
+               else
+                       memcpy(cf->data, &msg->u.log_message.data,
+                              cf->can_dlc);
+       } else {
+               cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
+                            (msg->u.rx_can.msg[1] & 0x3f);
+
+               if (msg->id == CMD_RX_EXT_MESSAGE) {
+                       cf->can_id <<= 18;
+                       cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
+                                     ((msg->u.rx_can.msg[3] & 0xff) << 6) |
+                                     (msg->u.rx_can.msg[4] & 0x3f);
+                       cf->can_id |= CAN_EFF_FLAG;
+               }
+
+               cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+
+               if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+                       cf->can_id |= CAN_RTR_FLAG;
+               else
+                       memcpy(cf->data, &msg->u.rx_can.msg[6],
+                              cf->can_dlc);
+       }
 
        netif_rx(skb);
 
@@ -911,6 +937,7 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
 
        case CMD_RX_STD_MESSAGE:
        case CMD_RX_EXT_MESSAGE:
+       case CMD_LOG_MESSAGE:
                kvaser_usb_rx_can_msg(dev, msg);
                break;
 
@@ -919,11 +946,6 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
                kvaser_usb_rx_error(dev, msg);
                break;
 
-       case CMD_LOG_MESSAGE:
-               if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
-                       kvaser_usb_rx_error(dev, msg);
-               break;
-
        case CMD_TX_ACKNOWLEDGE:
                kvaser_usb_tx_acknowledge(dev, msg);
                break;
index 30d79bfa5b109e5d6d212df46658a4cf1216d6e1..8ee9d1556e6e4eb3b8d32cfb988ba9870fccffaa 100644 (file)
@@ -504,15 +504,24 @@ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev,
        return usb_submit_urb(urb, GFP_ATOMIC);
 }
 
-static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
+static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
 {
-       u8 buffer[16];
+       u8 *buffer;
+       int err;
+
+       buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
 
        buffer[0] = 0;
        buffer[1] = !!loaded;
 
-       pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
-                             PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer));
+       err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
+                                   PCAN_USBPRO_FCT_DRVLD, buffer,
+                                   PCAN_USBPRO_FCT_DRVLD_REQ_LEN);
+       kfree(buffer);
+
+       return err;
 }
 
 static inline
@@ -851,21 +860,24 @@ static int pcan_usb_pro_stop(struct peak_usb_device *dev)
  */
 static int pcan_usb_pro_init(struct peak_usb_device *dev)
 {
-       struct pcan_usb_pro_interface *usb_if;
        struct pcan_usb_pro_device *pdev =
                        container_of(dev, struct pcan_usb_pro_device, dev);
+       struct pcan_usb_pro_interface *usb_if = NULL;
+       struct pcan_usb_pro_fwinfo *fi = NULL;
+       struct pcan_usb_pro_blinfo *bi = NULL;
+       int err;
 
        /* do this for 1st channel only */
        if (!dev->prev_siblings) {
-               struct pcan_usb_pro_fwinfo fi;
-               struct pcan_usb_pro_blinfo bi;
-               int err;
-
                /* allocate netdevices common structure attached to first one */
                usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface),
                                 GFP_KERNEL);
-               if (!usb_if)
-                       return -ENOMEM;
+               fi = kmalloc(sizeof(struct pcan_usb_pro_fwinfo), GFP_KERNEL);
+               bi = kmalloc(sizeof(struct pcan_usb_pro_blinfo), GFP_KERNEL);
+               if (!usb_if || !fi || !bi) {
+                       err = -ENOMEM;
+                       goto err_out;
+               }
 
                /* number of ts msgs to ignore before taking one into account */
                usb_if->cm_ignore_count = 5;
@@ -877,34 +889,34 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
                 */
                err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
                                            PCAN_USBPRO_INFO_FW,
-                                           &fi, sizeof(fi));
+                                           fi, sizeof(*fi));
                if (err) {
-                       kfree(usb_if);
                        dev_err(dev->netdev->dev.parent,
                                "unable to read %s firmware info (err %d)\n",
                                pcan_usb_pro.name, err);
-                       return err;
+                       goto err_out;
                }
 
                err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
                                            PCAN_USBPRO_INFO_BL,
-                                           &bi, sizeof(bi));
+                                           bi, sizeof(*bi));
                if (err) {
-                       kfree(usb_if);
                        dev_err(dev->netdev->dev.parent,
                                "unable to read %s bootloader info (err %d)\n",
                                pcan_usb_pro.name, err);
-                       return err;
+                       goto err_out;
                }
 
+               /* tell the device the can driver is running */
+               err = pcan_usb_pro_drv_loaded(dev, 1);
+               if (err)
+                       goto err_out;
+
                dev_info(dev->netdev->dev.parent,
                     "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n",
                     pcan_usb_pro.name,
-                    bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo,
+                    bi->hw_rev, bi->serial_num_hi, bi->serial_num_lo,
                     pcan_usb_pro.ctrl_count);
-
-               /* tell the device the can driver is running */
-               pcan_usb_pro_drv_loaded(dev, 1);
        } else {
                usb_if = pcan_usb_pro_dev_if(dev->prev_siblings);
        }
@@ -916,6 +928,13 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
        pcan_usb_pro_set_led(dev, 0, 1);
 
        return 0;
+
+ err_out:
+       kfree(bi);
+       kfree(fi);
+       kfree(usb_if);
+
+       return err;
 }
 
 static void pcan_usb_pro_exit(struct peak_usb_device *dev)
index a869918c5620ea8977e9c12e47cd1fadb9b0cea1..32275af547e06becb9d4150030012e52c753bd16 100644 (file)
@@ -29,6 +29,7 @@
 
 /* Vendor Request value for XXX_FCT */
 #define PCAN_USBPRO_FCT_DRVLD          5 /* tell device driver is loaded */
+#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN  16
 
 /* PCAN_USBPRO_INFO_BL vendor request record type */
 struct __packed pcan_usb_pro_blinfo {
index 6e15ef08f301fe385dec2727bdc6f92dded28e89..cbd388eea68271c8ab7eaa2581ffb9145a4be429 100644 (file)
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
        err = usb_8dev_cmd_version(priv, &version);
        if (err) {
                netdev_err(netdev, "can't get firmware version\n");
-               goto cleanup_cmd_msg_buffer;
+               goto cleanup_unregister_candev;
        } else {
                netdev_info(netdev,
                         "firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
 
        return 0;
 
+cleanup_unregister_candev:
+       unregister_netdev(priv->netdev);
+
 cleanup_cmd_msg_buffer:
        kfree(priv->cmd_msg_buffer);
 
index adb4bf5eb4b4166dcdaa6c7883ef01fd3deb2afe..ede8daa68275dfdefc4976e6261e29a01e93e93a 100644 (file)
@@ -723,25 +723,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
                           dev->name, skb->len, inw(ioaddr + EL3_STATUS));
        }
-#if 0
-#ifndef final_version
-       {       /* Error-checking code, delete someday. */
-               ushort status = inw(ioaddr + EL3_STATUS);
-               if (status & 0x0001 &&          /* IRQ line active, missed one. */
-                   inw(ioaddr + EL3_STATUS) & 1) {                     /* Make sure. */
-                       pr_debug("%s: Missed interrupt, status then %04x now %04x"
-                                  "  Tx %2.2x Rx %4.4x.\n", dev->name, status,
-                                  inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
-                                  inw(ioaddr + RX_STATUS));
-                       /* Fake interrupt trigger by masking, acknowledge interrupts. */
-                       outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
-                       outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
-                                ioaddr + EL3_CMD);
-                       outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
-               }
-       }
-#endif
-#endif
        /*
         *      We lock the driver against other processors. Note
         *      we don't need to lock versus the IRQ as we suspended
index de570a8f896742f5e0c345579b736d8030326356..ad5272b348f07debb61c540a909da8fa0e1593f1 100644 (file)
@@ -632,7 +632,6 @@ struct vortex_private {
                pm_state_valid:1,                               /* pci_dev->saved_config_space has sane contents */
                open:1,
                medialock:1,
-               must_free_region:1,                             /* Flag: if zero, Cardbus owns the I/O region */
                large_frames:1,                 /* accept large frames */
                handling_irq:1;                 /* private in_irq indicator */
        /* {get|set}_wol operations are already serialized by rtnl.
@@ -1012,6 +1011,10 @@ static int vortex_init_one(struct pci_dev *pdev,
        if (rc < 0)
                goto out;
 
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc < 0)
+               goto out_disable;
+
        unit = vortex_cards_found;
 
        if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
@@ -1027,21 +1030,24 @@ static int vortex_init_one(struct pci_dev *pdev,
        if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
                ioaddr = pci_iomap(pdev, 0, 0);
        if (!ioaddr) {
-               pci_disable_device(pdev);
                rc = -ENOMEM;
-               goto out;
+               goto out_release;
        }
 
        rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
                           ent->driver_data, unit);
-       if (rc < 0) {
-               pci_iounmap(pdev, ioaddr);
-               pci_disable_device(pdev);
-               goto out;
-       }
+       if (rc < 0)
+               goto out_iounmap;
 
        vortex_cards_found++;
-
+       goto out;
+
+out_iounmap:
+       pci_iounmap(pdev, ioaddr);
+out_release:
+       pci_release_regions(pdev);
+out_disable:
+       pci_disable_device(pdev);
 out:
        return rc;
 }
@@ -1178,11 +1184,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
 
        /* PCI-only startup logic */
        if (pdev) {
-               /* EISA resources already marked, so only PCI needs to do this here */
-               /* Ignore return value, because Cardbus drivers already allocate for us */
-               if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
-                       vp->must_free_region = 1;
-
                /* enable bus-mastering if necessary */
                if (vci->flags & PCI_USES_MASTER)
                        pci_set_master(pdev);
@@ -1220,7 +1221,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
                                           &vp->rx_ring_dma);
        retval = -ENOMEM;
        if (!vp->rx_ring)
-               goto free_region;
+               goto free_device;
 
        vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
        vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
@@ -1471,7 +1472,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
 
        if (pdev) {
                vp->pm_state_valid = 1;
-               pci_save_state(VORTEX_PCI(vp));
+               pci_save_state(pdev);
                acpi_set_WOL(dev);
        }
        retval = register_netdev(dev);
@@ -1484,9 +1485,7 @@ free_ring:
                                                        + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
                                                vp->rx_ring,
                                                vp->rx_ring_dma);
-free_region:
-       if (vp->must_free_region)
-               release_region(dev->base_addr, vci->io_size);
+free_device:
        free_netdev(dev);
        pr_err(PFX "vortex_probe1 fails.  Returns %d\n", retval);
 out:
@@ -3233,29 +3232,29 @@ static void vortex_remove_one(struct pci_dev *pdev)
        vp = netdev_priv(dev);
 
        if (vp->cb_fn_base)
-               pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base);
+               pci_iounmap(pdev, vp->cb_fn_base);
 
        unregister_netdev(dev);
 
-       if (VORTEX_PCI(vp)) {
-               pci_set_power_state(VORTEX_PCI(vp), PCI_D0);    /* Go active */
-               if (vp->pm_state_valid)
-                       pci_restore_state(VORTEX_PCI(vp));
-               pci_disable_device(VORTEX_PCI(vp));
-       }
+       pci_set_power_state(pdev, PCI_D0);      /* Go active */
+       if (vp->pm_state_valid)
+               pci_restore_state(pdev);
+       pci_disable_device(pdev);
+
        /* Should really use issue_and_wait() here */
        iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
             vp->ioaddr + EL3_CMD);
 
-       pci_iounmap(VORTEX_PCI(vp), vp->ioaddr);
+       pci_iounmap(pdev, vp->ioaddr);
 
        pci_free_consistent(pdev,
                                                sizeof(struct boom_rx_desc) * RX_RING_SIZE
                                                        + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
                                                vp->rx_ring,
                                                vp->rx_ring_dma);
-       if (vp->must_free_region)
-               release_region(dev->base_addr, vp->io_size);
+
+       pci_release_regions(pdev);
+
        free_netdev(dev);
 }
 
index 1c71c763f68009906a1d0bb1cd3eaf636a43ba1f..f00c76377b446cb63999857138e4f0365eccc015 100644 (file)
@@ -67,7 +67,6 @@ config PCMCIA_3C589
 config VORTEX
        tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
        depends on (PCI || EISA) && HAS_IOPORT
-       select NET_CORE
        select MII
        ---help---
          This option enables driver support for a large number of 10Mbps and
index 47618e505355ae91fc0ade4e50231ec780bcc56f..b2e8405137357f2cef6303c1a9a16d96b1abb23d 100644 (file)
@@ -849,7 +849,6 @@ static int ne_drv_remove(struct platform_device *pdev)
                free_irq(dev->irq, dev);
                release_region(dev->base_addr, NE_IO_EXTENT);
                free_netdev(dev);
-               platform_set_drvdata(pdev, NULL);
        }
        return 0;
 }
index ed956e08d38b1d835f233b4f1ba7698f75447c1b..2037080c504d67e6128797f4a920891fed5a6d0e 100644 (file)
@@ -20,9 +20,11 @@ config SUNGEM_PHY
 source "drivers/net/ethernet/3com/Kconfig"
 source "drivers/net/ethernet/adaptec/Kconfig"
 source "drivers/net/ethernet/aeroflex/Kconfig"
+source "drivers/net/ethernet/allwinner/Kconfig"
 source "drivers/net/ethernet/alteon/Kconfig"
 source "drivers/net/ethernet/amd/Kconfig"
 source "drivers/net/ethernet/apple/Kconfig"
+source "drivers/net/ethernet/arc/Kconfig"
 source "drivers/net/ethernet/atheros/Kconfig"
 source "drivers/net/ethernet/cadence/Kconfig"
 source "drivers/net/ethernet/adi/Kconfig"
@@ -63,7 +65,6 @@ config JME
        tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver supports the PCI-Express gigabit ethernet adapters
@@ -95,7 +96,6 @@ config FEALNX
        tristate "Myson MTD-8xx PCI Ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
@@ -106,7 +106,6 @@ source "drivers/net/ethernet/8390/Kconfig"
 
 config NET_NETX
        tristate "NetX Ethernet support"
-       select NET_CORE
        select MII
        depends on ARCH_NETX
        ---help---
@@ -124,7 +123,6 @@ source "drivers/net/ethernet/oki-semi/Kconfig"
 config ETHOC
        tristate "OpenCores 10/100 Mbps Ethernet MAC support"
        depends on HAS_IOMEM && HAS_DMA
-       select NET_CORE
        select MII
        select PHYLIB
        select CRC32
index 8268d85f944849b904ca44b749d3b5eb72b6e363..390bd0bfaa2721b655a80f5c1f74b848f2665cd5 100644 (file)
@@ -6,9 +6,11 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
 obj-$(CONFIG_NET_VENDOR_8390) += 8390/
 obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
 obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
 obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
 obj-$(CONFIG_NET_VENDOR_AMD) += amd/
 obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
+obj-$(CONFIG_NET_VENDOR_ARC) += arc/
 obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
 obj-$(CONFIG_NET_CADENCE) += cadence/
 obj-$(CONFIG_NET_BFIN) += adi/
index 0bff571b1bb3887dabf6516cbcb321b0f10dde90..5c804bbe3dabdeebc6dc16870e5f0b3177ed8246 100644 (file)
@@ -22,7 +22,6 @@ config ADAPTEC_STARFIRE
        tristate "Adaptec Starfire/DuraLAN support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network
index a9481606bbcd713f6fa446325521b2309b64c856..f952fff6a9a9e1c6100e234fa5c085731b1c81ce 100644 (file)
@@ -23,7 +23,6 @@ config BFIN_MAC
        tristate "Blackfin on-chip MAC support"
        depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
        select CRC32
-       select NET_CORE
        select MII
        select PHYLIB
        select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
index dada66bfe0d6e018778ba24939f15b82e9468b0d..e904b3838dccef44ace7fb126c32e9cd706597a9 100644 (file)
@@ -1719,7 +1719,6 @@ out_err_mii_probe:
        mdiobus_unregister(lp->mii_bus);
        mdiobus_free(lp->mii_bus);
 out_err_probe_mac:
-       platform_set_drvdata(pdev, NULL);
        free_netdev(ndev);
 
        return rc;
@@ -1732,8 +1731,6 @@ static int bfin_mac_remove(struct platform_device *pdev)
 
        bfin_phc_release(lp);
 
-       platform_set_drvdata(pdev, NULL);
-
        lp->mii_bus->priv = NULL;
 
        unregister_netdev(ndev);
@@ -1868,7 +1865,6 @@ static int bfin_mii_bus_remove(struct platform_device *pdev)
        struct bfin_mii_bus_platform_data *mii_bus_pd =
                dev_get_platdata(&pdev->dev);
 
-       platform_set_drvdata(pdev, NULL);
        mdiobus_unregister(miibus);
        kfree(miibus->irq);
        mdiobus_free(miibus);
index 269295403fc48959a35be1c65430e416fc385d0b..7ff4b30d55ea8bcbd5f9a1f2c18f99ffc88bcd2b 100644 (file)
@@ -1565,7 +1565,7 @@ error1:
 
 static int greth_of_remove(struct platform_device *of_dev)
 {
-       struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
+       struct net_device *ndev = platform_get_drvdata(of_dev);
        struct greth_private *greth = netdev_priv(ndev);
 
        /* Free descriptor areas */
@@ -1573,8 +1573,6 @@ static int greth_of_remove(struct platform_device *of_dev)
 
        dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
 
-       dev_set_drvdata(&of_dev->dev, NULL);
-
        if (greth->phy)
                phy_stop(greth->phy);
        mdiobus_unregister(greth->mdio);
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
new file mode 100644 (file)
index 0000000..53ad213
--- /dev/null
@@ -0,0 +1,35 @@
+#
+# Allwinner device configuration
+#
+
+config NET_VENDOR_ALLWINNER
+       bool "Allwinner devices"
+       default y
+       depends on ARCH_SUNXI
+       ---help---
+         If you have a network (Ethernet) card belonging to this
+        class, say Y and read the Ethernet-HOWTO, available from
+        <http://www.tldp.org/docs.html#howto>.
+
+        Note that the answer to this question doesn't directly
+        affect the kernel: saying N will just cause the configurator
+        to skip all the questions about Allwinner cards. If you say Y,
+        you will be asked for your specific card in the following
+        questions.
+
+if NET_VENDOR_ALLWINNER
+
+config SUN4I_EMAC
+        tristate "Allwinner A10 EMAC support"
+       depends on ARCH_SUNXI
+       depends on OF
+       select CRC32
+       select MII
+       select PHYLIB
+        ---help---
+          Support for Allwinner A10 EMAC ethernet driver.
+
+          To compile this driver as a module, choose M here.  The module
+          will be called sun4i-emac.
+
+endif # NET_VENDOR_ALLWINNER
diff --git a/drivers/net/ethernet/allwinner/Makefile b/drivers/net/ethernet/allwinner/Makefile
new file mode 100644 (file)
index 0000000..03129f7
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Allwinner device drivers.
+#
+
+obj-$(CONFIG_SUN4I_EMAC) += sun4i-emac.o
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
new file mode 100644 (file)
index 0000000..50b853a
--- /dev/null
@@ -0,0 +1,954 @@
+/*
+ * Allwinner EMAC Fast Ethernet driver for Linux.
+ *
+ * Copyright 2012-2013 Stefan Roese <sr@denx.de>
+ * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on the Linux driver provided by Allwinner:
+ * Copyright (C) 1997  Sten Wang
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+
+#include "sun4i-emac.h"
+
+#define DRV_NAME               "sun4i-emac"
+#define DRV_VERSION            "1.02"
+
+#define EMAC_MAX_FRAME_LEN     0x0600
+
+/* Transmit timeout, default 5 seconds. */
+static int watchdog = 5000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+/* EMAC register address locking.
+ *
+ * The EMAC uses an address register to control where data written
+ * to the data register goes. This means that the address register
+ * must be preserved over interrupts or similar calls.
+ *
+ * During interrupt and other critical calls, a spinlock is used to
+ * protect the system, but the calls themselves save the address
+ * in the address register in case they are interrupting another
+ * access to the device.
+ *
+ * For general accesses a lock is provided so that calls which are
+ * allowed to sleep are serialised so that the address register does
+ * not need to be saved. This lock also serves to serialise access
+ * to the EEPROM and PHY access registers which are shared between
+ * these two devices.
+ */
+
+/* The driver supports the original EMACE, and now the two newer
+ * devices, EMACA and EMACB.
+ */
+
+struct emac_board_info {
+       struct clk              *clk;
+       struct device           *dev;
+       struct platform_device  *pdev;
+       spinlock_t              lock;
+       void __iomem            *membase;
+       u32                     msg_enable;
+       struct net_device       *ndev;
+       struct sk_buff          *skb_last;
+       u16                     tx_fifo_stat;
+
+       int                     emacrx_completed_flag;
+
+       struct phy_device       *phy_dev;
+       struct device_node      *phy_node;
+       unsigned int            link;
+       unsigned int            speed;
+       unsigned int            duplex;
+
+       phy_interface_t         phy_interface;
+};
+
+static void emac_update_speed(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       unsigned int reg_val;
+
+       /* set EMAC SPEED, depend on PHY  */
+       reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
+       reg_val &= ~(0x1 << 8);
+       if (db->speed == SPEED_100)
+               reg_val |= 1 << 8;
+       writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
+}
+
+static void emac_update_duplex(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       unsigned int reg_val;
+
+       /* set duplex depend on phy */
+       reg_val = readl(db->membase + EMAC_MAC_CTL1_REG);
+       reg_val &= ~EMAC_MAC_CTL1_DUPLEX_EN;
+       if (db->duplex)
+               reg_val |= EMAC_MAC_CTL1_DUPLEX_EN;
+       writel(reg_val, db->membase + EMAC_MAC_CTL1_REG);
+}
+
+static void emac_handle_link_change(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       struct phy_device *phydev = db->phy_dev;
+       unsigned long flags;
+       int status_change = 0;
+
+       if (phydev->link) {
+               if (db->speed != phydev->speed) {
+                       spin_lock_irqsave(&db->lock, flags);
+                       db->speed = phydev->speed;
+                       emac_update_speed(dev);
+                       spin_unlock_irqrestore(&db->lock, flags);
+                       status_change = 1;
+               }
+
+               if (db->duplex != phydev->duplex) {
+                       spin_lock_irqsave(&db->lock, flags);
+                       db->duplex = phydev->duplex;
+                       emac_update_duplex(dev);
+                       spin_unlock_irqrestore(&db->lock, flags);
+                       status_change = 1;
+               }
+       }
+
+       if (phydev->link != db->link) {
+               if (!phydev->link) {
+                       db->speed = 0;
+                       db->duplex = -1;
+               }
+               db->link = phydev->link;
+
+               status_change = 1;
+       }
+
+       if (status_change)
+               phy_print_status(phydev);
+}
+
+static int emac_mdio_probe(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+
+       /* to-do: PHY interrupts are currently not supported */
+
+       /* attach the mac to the phy */
+       db->phy_dev = of_phy_connect(db->ndev, db->phy_node,
+                                    &emac_handle_link_change, 0,
+                                    db->phy_interface);
+       if (!db->phy_dev) {
+               netdev_err(db->ndev, "could not find the PHY\n");
+               return -ENODEV;
+       }
+
+       /* mask with MAC supported features */
+       db->phy_dev->supported &= PHY_BASIC_FEATURES;
+       db->phy_dev->advertising = db->phy_dev->supported;
+
+       db->link = 0;
+       db->speed = 0;
+       db->duplex = -1;
+
+       return 0;
+}
+
+static void emac_mdio_remove(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+
+       phy_disconnect(db->phy_dev);
+       db->phy_dev = NULL;
+}
+
+static void emac_reset(struct emac_board_info *db)
+{
+       dev_dbg(db->dev, "resetting device\n");
+
+       /* RESET device */
+       writel(0, db->membase + EMAC_CTL_REG);
+       udelay(200);
+       writel(EMAC_CTL_RESET, db->membase + EMAC_CTL_REG);
+       udelay(200);
+}
+
+static void emac_outblk_32bit(void __iomem *reg, void *data, int count)
+{
+       writesl(reg, data, round_up(count, 4) / 4);
+}
+
+static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
+{
+       readsl(reg, data, round_up(count, 4) / 4);
+}
+
+static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct emac_board_info *dm = netdev_priv(dev);
+       struct phy_device *phydev = dm->phy_dev;
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+/* ethtool ops */
+static void emac_get_drvinfo(struct net_device *dev,
+                             struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME));
+       strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
+       strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
+}
+
+static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct emac_board_info *dm = netdev_priv(dev);
+       struct phy_device *phydev = dm->phy_dev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_gset(phydev, cmd);
+}
+
+static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct emac_board_info *dm = netdev_priv(dev);
+       struct phy_device *phydev = dm->phy_dev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_sset(phydev, cmd);
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+       .get_drvinfo    = emac_get_drvinfo,
+       .get_settings   = emac_get_settings,
+       .set_settings   = emac_set_settings,
+       .get_link       = ethtool_op_get_link,
+};
+
+static unsigned int emac_setup(struct net_device *ndev)
+{
+       struct emac_board_info *db = netdev_priv(ndev);
+       unsigned int reg_val;
+
+       /* set up TX */
+       reg_val = readl(db->membase + EMAC_TX_MODE_REG);
+
+       writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN,
+               db->membase + EMAC_TX_MODE_REG);
+
+       /* set up RX */
+       reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+
+       writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
+               EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
+               EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
+               EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
+               db->membase + EMAC_RX_CTL_REG);
+
+       /* set MAC */
+       /* set MAC CTL0 */
+       reg_val = readl(db->membase + EMAC_MAC_CTL0_REG);
+       writel(reg_val | EMAC_MAC_CTL0_RX_FLOW_CTL_EN |
+               EMAC_MAC_CTL0_TX_FLOW_CTL_EN,
+               db->membase + EMAC_MAC_CTL0_REG);
+
+       /* set MAC CTL1 */
+       reg_val = readl(db->membase + EMAC_MAC_CTL1_REG);
+       reg_val |= EMAC_MAC_CTL1_LEN_CHECK_EN;
+       reg_val |= EMAC_MAC_CTL1_CRC_EN;
+       reg_val |= EMAC_MAC_CTL1_PAD_EN;
+       writel(reg_val, db->membase + EMAC_MAC_CTL1_REG);
+
+       /* set up IPGT */
+       writel(EMAC_MAC_IPGT_FULL_DUPLEX, db->membase + EMAC_MAC_IPGT_REG);
+
+       /* set up IPGR */
+       writel((EMAC_MAC_IPGR_IPG1 << 8) | EMAC_MAC_IPGR_IPG2,
+               db->membase + EMAC_MAC_IPGR_REG);
+
+       /* set up Collison window */
+       writel((EMAC_MAC_CLRT_COLLISION_WINDOW << 8) | EMAC_MAC_CLRT_RM,
+               db->membase + EMAC_MAC_CLRT_REG);
+
+       /* set up Max Frame Length */
+       writel(EMAC_MAX_FRAME_LEN,
+               db->membase + EMAC_MAC_MAXF_REG);
+
+       return 0;
+}
+
+static unsigned int emac_powerup(struct net_device *ndev)
+{
+       struct emac_board_info *db = netdev_priv(ndev);
+       unsigned int reg_val;
+
+       /* initial EMAC */
+       /* flush RX FIFO */
+       reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+       reg_val |= 0x8;
+       writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+       udelay(1);
+
+       /* initial MAC */
+       /* soft reset MAC */
+       reg_val = readl(db->membase + EMAC_MAC_CTL0_REG);
+       reg_val &= ~EMAC_MAC_CTL0_SOFT_RESET;
+       writel(reg_val, db->membase + EMAC_MAC_CTL0_REG);
+
+       /* set MII clock */
+       reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
+       reg_val &= (~(0xf << 2));
+       reg_val |= (0xD << 2);
+       writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
+
+       /* clear RX counter */
+       writel(0x0, db->membase + EMAC_RX_FBC_REG);
+
+       /* disable all interrupt and clear interrupt status */
+       writel(0, db->membase + EMAC_INT_CTL_REG);
+       reg_val = readl(db->membase + EMAC_INT_STA_REG);
+       writel(reg_val, db->membase + EMAC_INT_STA_REG);
+
+       udelay(1);
+
+       /* set up EMAC */
+       emac_setup(ndev);
+
+       /* set mac_address to chip */
+       writel(ndev->dev_addr[0] << 16 | ndev->dev_addr[1] << 8 | ndev->
+              dev_addr[2], db->membase + EMAC_MAC_A1_REG);
+       writel(ndev->dev_addr[3] << 16 | ndev->dev_addr[4] << 8 | ndev->
+              dev_addr[5], db->membase + EMAC_MAC_A0_REG);
+
+       mdelay(1);
+
+       return 0;
+}
+
+static int emac_set_mac_address(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+       struct emac_board_info *db = netdev_priv(dev);
+
+       if (netif_running(dev))
+               return -EBUSY;
+
+       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+       writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev->
+              dev_addr[2], db->membase + EMAC_MAC_A1_REG);
+       writel(dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev->
+              dev_addr[5], db->membase + EMAC_MAC_A0_REG);
+
+       return 0;
+}
+
+/* Initialize emac board */
+static void emac_init_device(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       unsigned long flags;
+       unsigned int reg_val;
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       emac_update_speed(dev);
+       emac_update_duplex(dev);
+
+       /* enable RX/TX */
+       reg_val = readl(db->membase + EMAC_CTL_REG);
+       writel(reg_val | EMAC_CTL_RESET | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN,
+               db->membase + EMAC_CTL_REG);
+
+       /* enable RX/TX0/RX Hlevel interrup */
+       reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+       reg_val |= (0xf << 0) | (0x01 << 8);
+       writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/* Our watchdog timed out. Called by the networking layer */
+static void emac_timeout(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       unsigned long flags;
+
+       if (netif_msg_timer(db))
+               dev_err(db->dev, "tx time out.\n");
+
+       /* Save previous register address */
+       spin_lock_irqsave(&db->lock, flags);
+
+       netif_stop_queue(dev);
+       emac_reset(db);
+       emac_init_device(dev);
+       /* We can accept TX packets again */
+       dev->trans_start = jiffies;
+       netif_wake_queue(dev);
+
+       /* Restore previous register address */
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/* Hardware start transmission.
+ * Send a packet to media from the upper layer.
+ */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       unsigned long channel;
+       unsigned long flags;
+
+       channel = db->tx_fifo_stat & 3;
+       if (channel == 3)
+               return 1;
+
+       channel = (channel == 1 ? 1 : 0);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       writel(channel, db->membase + EMAC_TX_INS_REG);
+
+       emac_outblk_32bit(db->membase + EMAC_TX_IO_DATA_REG,
+                       skb->data, skb->len);
+       dev->stats.tx_bytes += skb->len;
+
+       db->tx_fifo_stat |= 1 << channel;
+       /* TX control: First packet immediately send, second packet queue */
+       if (channel == 0) {
+               /* set TX len */
+               writel(skb->len, db->membase + EMAC_TX_PL0_REG);
+               /* start translate from fifo to phy */
+               writel(readl(db->membase + EMAC_TX_CTL0_REG) | 1,
+                      db->membase + EMAC_TX_CTL0_REG);
+
+               /* save the time stamp */
+               dev->trans_start = jiffies;
+       } else if (channel == 1) {
+               /* set TX len */
+               writel(skb->len, db->membase + EMAC_TX_PL1_REG);
+               /* start translate from fifo to phy */
+               writel(readl(db->membase + EMAC_TX_CTL1_REG) | 1,
+                      db->membase + EMAC_TX_CTL1_REG);
+
+               /* save the time stamp */
+               dev->trans_start = jiffies;
+       }
+
+       if ((db->tx_fifo_stat & 3) == 3) {
+               /* Second packet */
+               netif_stop_queue(dev);
+       }
+
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       /* free this SKB */
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+/* EMAC interrupt handler
+ * receive the packet to upper layer, free the transmitted packet
+ */
+static void emac_tx_done(struct net_device *dev, struct emac_board_info *db,
+                         unsigned int tx_status)
+{
+       /* One packet sent complete */
+       db->tx_fifo_stat &= ~(tx_status & 3);
+       if (3 == (tx_status & 3))
+               dev->stats.tx_packets += 2;
+       else
+               dev->stats.tx_packets++;
+
+       if (netif_msg_tx_done(db))
+               dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
+
+       netif_wake_queue(dev);
+}
+
+/* Received a packet and pass to upper layer
+ */
+static void emac_rx(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       struct sk_buff *skb;
+       u8 *rdptr;
+       bool good_packet;
+       static int rxlen_last;
+       unsigned int reg_val;
+       u32 rxhdr, rxstatus, rxcount, rxlen;
+
+       /* Check packet ready or not */
+       while (1) {
+               /* race warning: the first packet might arrive with
+                * the interrupts disabled, but the second will fix
+                * it
+                */
+               rxcount = readl(db->membase + EMAC_RX_FBC_REG);
+
+               if (netif_msg_rx_status(db))
+                       dev_dbg(db->dev, "RXCount: %x\n", rxcount);
+
+               if ((db->skb_last != NULL) && (rxlen_last > 0)) {
+                       dev->stats.rx_bytes += rxlen_last;
+
+                       /* Pass to upper layer */
+                       db->skb_last->protocol = eth_type_trans(db->skb_last,
+                                                               dev);
+                       netif_rx(db->skb_last);
+                       dev->stats.rx_packets++;
+                       db->skb_last = NULL;
+                       rxlen_last = 0;
+
+                       reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+                       reg_val &= ~EMAC_RX_CTL_DMA_EN;
+                       writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+               }
+
+               if (!rxcount) {
+                       db->emacrx_completed_flag = 1;
+                       reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+                       reg_val |= (0xf << 0) | (0x01 << 8);
+                       writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+
+                       /* had one stuck? */
+                       rxcount = readl(db->membase + EMAC_RX_FBC_REG);
+                       if (!rxcount)
+                               return;
+               }
+
+               reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG);
+               if (netif_msg_rx_status(db))
+                       dev_dbg(db->dev, "receive header: %x\n", reg_val);
+               if (reg_val != EMAC_UNDOCUMENTED_MAGIC) {
+                       /* disable RX */
+                       reg_val = readl(db->membase + EMAC_CTL_REG);
+                       writel(reg_val & ~EMAC_CTL_RX_EN,
+                              db->membase + EMAC_CTL_REG);
+
+                       /* Flush RX FIFO */
+                       reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+                       writel(reg_val | (1 << 3),
+                              db->membase + EMAC_RX_CTL_REG);
+
+                       do {
+                               reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+                       } while (reg_val & (1 << 3));
+
+                       /* enable RX */
+                       reg_val = readl(db->membase + EMAC_CTL_REG);
+                       writel(reg_val | EMAC_CTL_RX_EN,
+                              db->membase + EMAC_CTL_REG);
+                       reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+                       reg_val |= (0xf << 0) | (0x01 << 8);
+                       writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+
+                       db->emacrx_completed_flag = 1;
+
+                       return;
+               }
+
+               /* A packet ready now  & Get status/length */
+               good_packet = true;
+
+               emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
+                               &rxhdr, sizeof(rxhdr));
+
+               if (netif_msg_rx_status(db))
+                       dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr)));
+
+               rxlen = EMAC_RX_IO_DATA_LEN(rxhdr);
+               rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr);
+
+               if (netif_msg_rx_status(db))
+                       dev_dbg(db->dev, "RX: status %02x, length %04x\n",
+                               rxstatus, rxlen);
+
+               /* Packet Status check */
+               if (rxlen < 0x40) {
+                       good_packet = false;
+                       if (netif_msg_rx_err(db))
+                               dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
+               }
+
+               if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) {
+                       good_packet = false;
+
+                       if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) {
+                               if (netif_msg_rx_err(db))
+                                       dev_dbg(db->dev, "crc error\n");
+                               dev->stats.rx_crc_errors++;
+                       }
+
+                       if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) {
+                               if (netif_msg_rx_err(db))
+                                       dev_dbg(db->dev, "length error\n");
+                               dev->stats.rx_length_errors++;
+                       }
+               }
+
+               /* Move data from EMAC */
+               skb = dev_alloc_skb(rxlen + 4);
+               if (good_packet && skb) {
+                       skb_reserve(skb, 2);
+                       rdptr = (u8 *) skb_put(skb, rxlen - 4);
+
+                       /* Read received packet from RX SRAM */
+                       if (netif_msg_rx_status(db))
+                               dev_dbg(db->dev, "RxLen %x\n", rxlen);
+
+                       emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
+                                       rdptr, rxlen);
+                       dev->stats.rx_bytes += rxlen;
+
+                       /* Pass to upper layer */
+                       skb->protocol = eth_type_trans(skb, dev);
+                       netif_rx(skb);
+                       dev->stats.rx_packets++;
+               }
+       }
+}
+
+static irqreturn_t emac_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct emac_board_info *db = netdev_priv(dev);
+       int int_status;
+       unsigned long flags;
+       unsigned int reg_val;
+
+       /* A real interrupt coming */
+
+       /* holders of db->lock must always block IRQs */
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* Disable all interrupts */
+       writel(0, db->membase + EMAC_INT_CTL_REG);
+
+       /* Got EMAC interrupt status */
+       /* Got ISR */
+       int_status = readl(db->membase + EMAC_INT_STA_REG);
+       /* Clear ISR status */
+       writel(int_status, db->membase + EMAC_INT_STA_REG);
+
+       if (netif_msg_intr(db))
+               dev_dbg(db->dev, "emac interrupt %02x\n", int_status);
+
+       /* Received the coming packet */
+       if ((int_status & 0x100) && (db->emacrx_completed_flag == 1)) {
+               /* carrier lost */
+               db->emacrx_completed_flag = 0;
+               emac_rx(dev);
+       }
+
+       /* Transmit Interrupt check */
+       if (int_status & (0x01 | 0x02))
+               emac_tx_done(dev, db, int_status);
+
+       if (int_status & (0x04 | 0x08))
+               netdev_info(dev, " ab : %x\n", int_status);
+
+       /* Re-enable interrupt mask */
+       if (db->emacrx_completed_flag == 1) {
+               reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+               reg_val |= (0xf << 0) | (0x01 << 8);
+               writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+       }
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Used by netconsole
+ */
+static void emac_poll_controller(struct net_device *dev)
+{
+       disable_irq(dev->irq);
+       emac_interrupt(dev->irq, dev);
+       enable_irq(dev->irq);
+}
+#endif
+
+/*  Open the interface.
+ *  The interface is opened whenever "ifconfig" actives it.
+ */
+static int emac_open(struct net_device *dev)
+{
+       struct emac_board_info *db = netdev_priv(dev);
+       int ret;
+
+       if (netif_msg_ifup(db))
+               dev_dbg(db->dev, "enabling %s\n", dev->name);
+
+       if (devm_request_irq(db->dev, dev->irq, &emac_interrupt,
+                            0, dev->name, dev))
+               return -EAGAIN;
+
+       /* Initialize EMAC board */
+       emac_reset(db);
+       emac_init_device(dev);
+
+       ret = emac_mdio_probe(dev);
+       if (ret < 0) {
+               netdev_err(dev, "cannot probe MDIO bus\n");
+               return ret;
+       }
+
+       phy_start(db->phy_dev);
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+static void emac_shutdown(struct net_device *dev)
+{
+       unsigned int reg_val;
+       struct emac_board_info *db = netdev_priv(dev);
+
+       /* Disable all interrupt */
+       writel(0, db->membase + EMAC_INT_CTL_REG);
+
+       /* clear interupt status */
+       reg_val = readl(db->membase + EMAC_INT_STA_REG);
+       writel(reg_val, db->membase + EMAC_INT_STA_REG);
+
+       /* Disable RX/TX */
+       reg_val = readl(db->membase + EMAC_CTL_REG);
+       reg_val &= ~(EMAC_CTL_TX_EN | EMAC_CTL_RX_EN | EMAC_CTL_RESET);
+       writel(reg_val, db->membase + EMAC_CTL_REG);
+}
+
+/* Stop the interface.
+ * The interface is stopped when it is brought.
+ */
+static int emac_stop(struct net_device *ndev)
+{
+       struct emac_board_info *db = netdev_priv(ndev);
+
+       if (netif_msg_ifdown(db))
+               dev_dbg(db->dev, "shutting down %s\n", ndev->name);
+
+       netif_stop_queue(ndev);
+       netif_carrier_off(ndev);
+
+       phy_stop(db->phy_dev);
+
+       emac_mdio_remove(ndev);
+
+       emac_shutdown(ndev);
+
+       return 0;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+       .ndo_open               = emac_open,
+       .ndo_stop               = emac_stop,
+       .ndo_start_xmit         = emac_start_xmit,
+       .ndo_tx_timeout         = emac_timeout,
+       .ndo_do_ioctl           = emac_ioctl,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = emac_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = emac_poll_controller,
+#endif
+};
+
+/* Search EMAC board, allocate space and register it
+ */
+static int emac_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct emac_board_info *db;
+       struct net_device *ndev;
+       int ret = 0;
+       const char *mac_addr;
+
+       ndev = alloc_etherdev(sizeof(struct emac_board_info));
+       if (!ndev) {
+               dev_err(&pdev->dev, "could not allocate device.\n");
+               return -ENOMEM;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       db = netdev_priv(ndev);
+       memset(db, 0, sizeof(*db));
+
+       db->dev = &pdev->dev;
+       db->ndev = ndev;
+       db->pdev = pdev;
+
+       spin_lock_init(&db->lock);
+
+       db->membase = of_iomap(np, 0);
+       if (!db->membase) {
+               dev_err(&pdev->dev, "failed to remap registers\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* fill in parameters for net-dev structure */
+       ndev->base_addr = (unsigned long)db->membase;
+       ndev->irq = irq_of_parse_and_map(np, 0);
+       if (ndev->irq == -ENXIO) {
+               netdev_err(ndev, "No irq resource\n");
+               ret = ndev->irq;
+               goto out;
+       }
+
+       db->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(db->clk))
+               goto out;
+
+       clk_prepare_enable(db->clk);
+
+       db->phy_node = of_parse_phandle(np, "phy", 0);
+       if (!db->phy_node) {
+               dev_err(&pdev->dev, "no associated PHY\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       /* Read MAC-address from DT */
+       mac_addr = of_get_mac_address(np);
+       if (mac_addr)
+               memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+
+       /* Check if the MAC address is valid, if not get a random one */
+       if (!is_valid_ether_addr(ndev->dev_addr)) {
+               eth_hw_addr_random(ndev);
+               dev_warn(&pdev->dev, "using random MAC address %pM\n",
+                        ndev->dev_addr);
+       }
+
+       db->emacrx_completed_flag = 1;
+       emac_powerup(ndev);
+       emac_reset(db);
+
+       ether_setup(ndev);
+
+       ndev->netdev_ops = &emac_netdev_ops;
+       ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+       ndev->ethtool_ops = &emac_ethtool_ops;
+
+       platform_set_drvdata(pdev, ndev);
+
+       /* Carrier starts down, phylib will bring it up */
+       netif_carrier_off(ndev);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "Registering netdev failed!\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n",
+                ndev->name, db->membase, ndev->irq, ndev->dev_addr);
+
+       return 0;
+
+out:
+       dev_err(db->dev, "not found (%d).\n", ret);
+
+       free_netdev(ndev);
+
+       return ret;
+}
+
+static int emac_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+
+       unregister_netdev(ndev);
+       free_netdev(ndev);
+
+       dev_dbg(&pdev->dev, "released and freed device\n");
+       return 0;
+}
+
+static int emac_suspend(struct platform_device *dev, pm_message_t state)
+{
+       struct net_device *ndev = platform_get_drvdata(dev);
+
+       netif_carrier_off(ndev);
+       netif_device_detach(ndev);
+       emac_shutdown(ndev);
+
+       return 0;
+}
+
+static int emac_resume(struct platform_device *dev)
+{
+       struct net_device *ndev = platform_get_drvdata(dev);
+       struct emac_board_info *db = netdev_priv(ndev);
+
+       emac_reset(db);
+       emac_init_device(ndev);
+       netif_device_attach(ndev);
+
+       return 0;
+}
+
+static const struct of_device_id emac_of_match[] = {
+       {.compatible = "allwinner,sun4i-emac",},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, emac_of_match);
+
+static struct platform_driver emac_driver = {
+       .driver = {
+               .name = "sun4i-emac",
+               .of_match_table = emac_of_match,
+       },
+       .probe = emac_probe,
+       .remove = emac_remove,
+       .suspend = emac_suspend,
+       .resume = emac_resume,
+};
+
+module_platform_driver(emac_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 emac network driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
new file mode 100644 (file)
index 0000000..38c72d9
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Allwinner EMAC Fast Ethernet driver for Linux.
+ *
+ * Copyright 2012 Stefan Roese <sr@denx.de>
+ * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on the Linux driver provided by Allwinner:
+ * Copyright (C) 1997  Sten Wang
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _SUN4I_EMAC_H_
+#define _SUN4I_EMAC_H_
+
+#define EMAC_CTL_REG           (0x00)
+#define EMAC_CTL_RESET                 (1 << 0)
+#define EMAC_CTL_TX_EN                 (1 << 1)
+#define EMAC_CTL_RX_EN                 (1 << 2)
+#define EMAC_TX_MODE_REG       (0x04)
+#define EMAC_TX_MODE_ABORTED_FRAME_EN  (1 << 0)
+#define EMAC_TX_MODE_DMA_EN            (1 << 1)
+#define EMAC_TX_FLOW_REG       (0x08)
+#define EMAC_TX_CTL0_REG       (0x0c)
+#define EMAC_TX_CTL1_REG       (0x10)
+#define EMAC_TX_INS_REG                (0x14)
+#define EMAC_TX_PL0_REG                (0x18)
+#define EMAC_TX_PL1_REG                (0x1c)
+#define EMAC_TX_STA_REG                (0x20)
+#define EMAC_TX_IO_DATA_REG    (0x24)
+#define EMAC_TX_IO_DATA1_REG   (0x28)
+#define EMAC_TX_TSVL0_REG      (0x2c)
+#define EMAC_TX_TSVH0_REG      (0x30)
+#define EMAC_TX_TSVL1_REG      (0x34)
+#define EMAC_TX_TSVH1_REG      (0x38)
+#define EMAC_RX_CTL_REG                (0x3c)
+#define EMAC_RX_CTL_AUTO_DRQ_EN                (1 << 1)
+#define EMAC_RX_CTL_DMA_EN             (1 << 2)
+#define EMAC_RX_CTL_PASS_ALL_EN                (1 << 4)
+#define EMAC_RX_CTL_PASS_CTL_EN                (1 << 5)
+#define EMAC_RX_CTL_PASS_CRC_ERR_EN    (1 << 6)
+#define EMAC_RX_CTL_PASS_LEN_ERR_EN    (1 << 7)
+#define EMAC_RX_CTL_PASS_LEN_OOR_EN    (1 << 8)
+#define EMAC_RX_CTL_ACCEPT_UNICAST_EN  (1 << 16)
+#define EMAC_RX_CTL_DA_FILTER_EN       (1 << 17)
+#define EMAC_RX_CTL_ACCEPT_MULTICAST_EN        (1 << 20)
+#define EMAC_RX_CTL_HASH_FILTER_EN     (1 << 21)
+#define EMAC_RX_CTL_ACCEPT_BROADCAST_EN        (1 << 22)
+#define EMAC_RX_CTL_SA_FILTER_EN       (1 << 24)
+#define EMAC_RX_CTL_SA_FILTER_INVERT_EN        (1 << 25)
+#define EMAC_RX_HASH0_REG      (0x40)
+#define EMAC_RX_HASH1_REG      (0x44)
+#define EMAC_RX_STA_REG                (0x48)
+#define EMAC_RX_IO_DATA_REG    (0x4c)
+#define EMAC_RX_IO_DATA_LEN(x)         (x & 0xffff)
+#define EMAC_RX_IO_DATA_STATUS(x)      ((x >> 16) & 0xffff)
+#define EMAC_RX_IO_DATA_STATUS_CRC_ERR (1 << 4)
+#define EMAC_RX_IO_DATA_STATUS_LEN_ERR (3 << 5)
+#define EMAC_RX_IO_DATA_STATUS_OK      (1 << 7)
+#define EMAC_RX_FBC_REG                (0x50)
+#define EMAC_INT_CTL_REG       (0x54)
+#define EMAC_INT_STA_REG       (0x58)
+#define EMAC_MAC_CTL0_REG      (0x5c)
+#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN   (1 << 2)
+#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN   (1 << 3)
+#define EMAC_MAC_CTL0_SOFT_RESET       (1 << 15)
+#define EMAC_MAC_CTL1_REG      (0x60)
+#define EMAC_MAC_CTL1_DUPLEX_EN                (1 << 0)
+#define EMAC_MAC_CTL1_LEN_CHECK_EN     (1 << 1)
+#define EMAC_MAC_CTL1_HUGE_FRAME_EN    (1 << 2)
+#define EMAC_MAC_CTL1_DELAYED_CRC_EN   (1 << 3)
+#define EMAC_MAC_CTL1_CRC_EN           (1 << 4)
+#define EMAC_MAC_CTL1_PAD_EN           (1 << 5)
+#define EMAC_MAC_CTL1_PAD_CRC_EN       (1 << 6)
+#define EMAC_MAC_CTL1_AD_SHORT_FRAME_EN        (1 << 7)
+#define EMAC_MAC_CTL1_BACKOFF_DIS      (1 << 12)
+#define EMAC_MAC_IPGT_REG      (0x64)
+#define EMAC_MAC_IPGT_HALF_DUPLEX      (0x12)
+#define EMAC_MAC_IPGT_FULL_DUPLEX      (0x15)
+#define EMAC_MAC_IPGR_REG      (0x68)
+#define EMAC_MAC_IPGR_IPG1             (0x0c)
+#define EMAC_MAC_IPGR_IPG2             (0x12)
+#define EMAC_MAC_CLRT_REG      (0x6c)
+#define EMAC_MAC_CLRT_COLLISION_WINDOW (0x37)
+#define EMAC_MAC_CLRT_RM               (0x0f)
+#define EMAC_MAC_MAXF_REG      (0x70)
+#define EMAC_MAC_SUPP_REG      (0x74)
+#define EMAC_MAC_TEST_REG      (0x78)
+#define EMAC_MAC_MCFG_REG      (0x7c)
+#define EMAC_MAC_A0_REG                (0x98)
+#define EMAC_MAC_A1_REG                (0x9c)
+#define EMAC_MAC_A2_REG                (0xa0)
+#define EMAC_SAFX_L_REG0       (0xa4)
+#define EMAC_SAFX_H_REG0       (0xa8)
+#define EMAC_SAFX_L_REG1       (0xac)
+#define EMAC_SAFX_H_REG1       (0xb0)
+#define EMAC_SAFX_L_REG2       (0xb4)
+#define EMAC_SAFX_H_REG2       (0xb8)
+#define EMAC_SAFX_L_REG3       (0xbc)
+#define EMAC_SAFX_H_REG3       (0xc0)
+
+#define EMAC_PHY_DUPLEX                (1 << 8)
+
+#define EMAC_EEPROM_MAGIC      (0x444d394b)
+#define EMAC_UNDOCUMENTED_MAGIC        (0x0143414d)
+#endif /* _SUN4I_EMAC_H_ */
index b7894f8af9d1969e31410b0ffb3784c774a88275..219be1bf3cfccd60db67ac0515f5e49c65def9e0 100644 (file)
@@ -702,19 +702,6 @@ static struct pci_driver acenic_pci_driver = {
        .remove         = acenic_remove_one,
 };
 
-static int __init acenic_init(void)
-{
-       return pci_register_driver(&acenic_pci_driver);
-}
-
-static void __exit acenic_exit(void)
-{
-       pci_unregister_driver(&acenic_pci_driver);
-}
-
-module_init(acenic_init);
-module_exit(acenic_exit);
-
 static void ace_free_descriptors(struct net_device *dev)
 {
        struct ace_private *ap = netdev_priv(dev);
@@ -3199,3 +3186,5 @@ static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
               ap->name, offset);
        goto out;
 }
+
+module_pci_driver(acenic_pci_driver);
index 13d74aa4033dd44164dada90d0173efe526f9c16..562df46e0a82e24968b8b89bc35a7b357982e6ec 100644 (file)
@@ -34,7 +34,6 @@ config AMD8111_ETH
        tristate "AMD 8111 (new PCI LANCE) support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          If you have an AMD 8111-based PCI LANCE ethernet card,
@@ -60,7 +59,6 @@ config PCNET32
        tristate "AMD PCnet32 PCI support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
index 8e6b665a672686350f872565e4e071fad7c6caa9..1b1429d5d5c287f761a3066bc6bbebe6b74995ee 100644 (file)
@@ -1813,7 +1813,7 @@ static const struct net_device_ops amd8111e_netdev_ops = {
 static int amd8111e_probe_one(struct pci_dev *pdev,
                                  const struct pci_device_id *ent)
 {
-       int err,i,pm_cap;
+       int err, i;
        unsigned long reg_addr,reg_len;
        struct amd8111e_priv* lp;
        struct net_device* dev;
@@ -1842,7 +1842,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
        pci_set_master(pdev);
 
        /* Find power-management capability. */
-       if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
+       if (!pdev->pm_cap) {
                printk(KERN_ERR "amd8111e: No Power Management capability, "
                       "exiting.\n");
                err = -ENODEV;
@@ -1875,7 +1875,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
        lp = netdev_priv(dev);
        lp->pci_dev = pdev;
        lp->amd8111e_net_dev = dev;
-       lp->pm_cap = pm_cap;
+       lp->pm_cap = pdev->pm_cap;
 
        spin_lock_init(&lp->lock);
 
@@ -1981,15 +1981,4 @@ static struct pci_driver amd8111e_driver = {
        .resume         = amd8111e_resume
 };
 
-static int __init amd8111e_init(void)
-{
-       return pci_register_driver(&amd8111e_driver);
-}
-
-static void __exit amd8111e_cleanup(void)
-{
-       pci_unregister_driver(&amd8111e_driver);
-}
-
-module_init(amd8111e_init);
-module_exit(amd8111e_cleanup);
+module_pci_driver(amd8111e_driver);
index 688aede742c7d0068d061f377582253f52280d12..ceb45bc963a93d8ba9cb2c554ff2c002ec6c916c 100644 (file)
@@ -1301,8 +1301,6 @@ static int au1000_remove(struct platform_device *pdev)
        int i;
        struct resource *base, *macen;
 
-       platform_set_drvdata(pdev, NULL);
-
        unregister_netdev(dev);
        mdiobus_unregister(aup->mii_bus);
        mdiobus_free(aup->mii_bus);
index f47b780892e97f4a4e5ca186e08ee1679f4eee6c..ece56831a647a9e1d93cbdcab7fed2842b6cd549 100644 (file)
@@ -1470,7 +1470,7 @@ no_link_test:
                goto fail;
        }
 
-       dev_set_drvdata(&op->dev, lp);
+       platform_set_drvdata(op, lp);
 
        printk(KERN_INFO "%s: LANCE %pM\n",
               dev->name, dev->dev_addr);
@@ -1501,7 +1501,7 @@ static int sunlance_sbus_probe(struct platform_device *op)
 
 static int sunlance_sbus_remove(struct platform_device *op)
 {
-       struct lance_private *lp = dev_get_drvdata(&op->dev);
+       struct lance_private *lp = platform_get_drvdata(op);
        struct net_device *net_dev = lp->dev;
 
        unregister_netdev(net_dev);
@@ -1510,8 +1510,6 @@ static int sunlance_sbus_remove(struct platform_device *op)
 
        free_netdev(net_dev);
 
-       dev_set_drvdata(&op->dev, NULL);
-
        return 0;
 }
 
index f36bbd6d5085da5f5d2ba830b88c7e517e3f5f9e..a597b766f0809d3b1e1893e9ec008fcdc813405f 100644 (file)
@@ -1016,7 +1016,6 @@ static void bmac_set_multicast(struct net_device *dev)
 static void bmac_set_multicast(struct net_device *dev)
 {
        struct netdev_hw_addr *ha;
-       int i;
        unsigned short rx_cfg;
        u32 crc;
 
@@ -1030,14 +1029,12 @@ static void bmac_set_multicast(struct net_device *dev)
                rx_cfg |= RxPromiscEnable;
                bmwrite(dev, RXCFG, rx_cfg);
        } else {
-               u16 hash_table[4];
+               u16 hash_table[4] = { 0 };
 
                rx_cfg = bmread(dev, RXCFG);
                rx_cfg &= ~RxPromiscEnable;
                bmwrite(dev, RXCFG, rx_cfg);
 
-               for(i = 0; i < 4; i++) hash_table[i] = 0;
-
                netdev_for_each_mc_addr(ha, dev) {
                        crc = ether_crc_le(6, ha->addr);
                        crc >>= 26;
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
new file mode 100644 (file)
index 0000000..514c57f
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# ARC EMAC network device configuration
+#
+
+config NET_VENDOR_ARC
+       bool "ARC devices"
+       default y
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say Y
+         and read the Ethernet-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about ARC cards. If you say Y, you will be asked for
+         your specific card in the following questions.
+
+if NET_VENDOR_ARC
+
+config ARC_EMAC
+       tristate "ARC EMAC support"
+       select MII
+       select PHYLIB
+       depends on OF_IRQ
+       depends on OF_NET
+       ---help---
+         On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
+         non-standard on-chip ethernet device ARC EMAC 10/100 is used.
+         Say Y here if you have such a board.  If unsure, say N.
+
+endif # NET_VENDOR_ARC
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
new file mode 100644 (file)
index 0000000..00c8657
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for the ARC network device drivers.
+#
+
+arc_emac-objs := emac_main.o emac_mdio.o
+obj-$(CONFIG_ARC_EMAC) += arc_emac.o
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
new file mode 100644 (file)
index 0000000..dc08678
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Registers and bits definitions of ARC EMAC
+ */
+
+#ifndef ARC_EMAC_H
+#define ARC_EMAC_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+/* STATUS and ENABLE Register bit masks */
+#define TXINT_MASK     (1<<0)  /* Transmit interrupt */
+#define RXINT_MASK     (1<<1)  /* Receive interrupt */
+#define ERR_MASK       (1<<2)  /* Error interrupt */
+#define TXCH_MASK      (1<<3)  /* Transmit chaining error interrupt */
+#define MSER_MASK      (1<<4)  /* Missed packet counter error */
+#define RXCR_MASK      (1<<8)  /* RXCRCERR counter rolled over  */
+#define RXFR_MASK      (1<<9)  /* RXFRAMEERR counter rolled over */
+#define RXFL_MASK      (1<<10) /* RXOFLOWERR counter rolled over */
+#define MDIO_MASK      (1<<12) /* MDIO complete interrupt */
+#define TXPL_MASK      (1<<31) /* Force polling of BD by EMAC */
+
+/* CONTROL Register bit masks */
+#define EN_MASK                (1<<0)  /* VMAC enable */
+#define TXRN_MASK      (1<<3)  /* TX enable */
+#define RXRN_MASK      (1<<4)  /* RX enable */
+#define DSBC_MASK      (1<<8)  /* Disable receive broadcast */
+#define ENFL_MASK      (1<<10) /* Enable Full-duplex */
+#define PROM_MASK      (1<<11) /* Promiscuous mode */
+
+/* Buffer descriptor INFO bit masks */
+#define OWN_MASK       (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */
+#define FIRST_MASK     (1<<16) /* First buffer in chain */
+#define LAST_MASK      (1<<17) /* Last buffer in chain */
+#define LEN_MASK       0x000007FF      /* last 11 bits */
+#define CRLS           (1<<21)
+#define DEFR           (1<<22)
+#define DROP           (1<<23)
+#define RTRY           (1<<24)
+#define LTCL           (1<<28)
+#define UFLO           (1<<29)
+
+#define FOR_EMAC       OWN_MASK
+#define FOR_CPU                0
+
+/* ARC EMAC register set combines entries for MAC and MDIO */
+enum {
+       R_ID = 0,
+       R_STATUS,
+       R_ENABLE,
+       R_CTRL,
+       R_POLLRATE,
+       R_RXERR,
+       R_MISS,
+       R_TX_RING,
+       R_RX_RING,
+       R_ADDRL,
+       R_ADDRH,
+       R_LAFL,
+       R_LAFH,
+       R_MDIO,
+};
+
+#define TX_TIMEOUT             (400*HZ/1000)   /* Transmission timeout */
+
+#define ARC_EMAC_NAPI_WEIGHT   40              /* Workload for NAPI */
+
+#define EMAC_BUFFER_SIZE       1536            /* EMAC buffer size */
+
+/**
+ * struct arc_emac_bd - EMAC buffer descriptor (BD).
+ *
+ * @info:      Contains status information on the buffer itself.
+ * @data:      32-bit byte addressable pointer to the packet data.
+ */
+struct arc_emac_bd {
+       __le32 info;
+       dma_addr_t data;
+};
+
+/* Number of Rx/Tx BD's */
+#define RX_BD_NUM      128
+#define TX_BD_NUM      128
+
+#define RX_RING_SZ     (RX_BD_NUM * sizeof(struct arc_emac_bd))
+#define TX_RING_SZ     (TX_BD_NUM * sizeof(struct arc_emac_bd))
+
+/**
+ * struct buffer_state - Stores Rx/Tx buffer state.
+ * @sk_buff:   Pointer to socket buffer.
+ * @addr:      Start address of DMA-mapped memory region.
+ * @len:       Length of DMA-mapped memory region.
+ */
+struct buffer_state {
+       struct sk_buff *skb;
+       DEFINE_DMA_UNMAP_ADDR(addr);
+       DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/**
+ * struct arc_emac_priv - Storage of EMAC's private information.
+ * @dev:       Pointer to the current device.
+ * @ndev:      Pointer to the current network device.
+ * @phy_dev:   Pointer to attached PHY device.
+ * @bus:       Pointer to the current MII bus.
+ * @regs:      Base address of EMAC memory-mapped control registers.
+ * @napi:      Structure for NAPI.
+ * @stats:     Network device statistics.
+ * @rxbd:      Pointer to Rx BD ring.
+ * @txbd:      Pointer to Tx BD ring.
+ * @rxbd_dma:  DMA handle for Rx BD ring.
+ * @txbd_dma:  DMA handle for Tx BD ring.
+ * @rx_buff:   Storage for Rx buffers states.
+ * @tx_buff:   Storage for Tx buffers states.
+ * @txbd_curr: Index of Tx BD to use on the next "ndo_start_xmit".
+ * @txbd_dirty:        Index of Tx BD to free on the next Tx interrupt.
+ * @last_rx_bd:        Index of the last Rx BD we've got from EMAC.
+ * @link:      PHY's last seen link state.
+ * @duplex:    PHY's last set duplex mode.
+ * @speed:     PHY's last set speed.
+ * @max_speed: Maximum supported by current system network data-rate.
+ */
+struct arc_emac_priv {
+       /* Devices */
+       struct device *dev;
+       struct net_device *ndev;
+       struct phy_device *phy_dev;
+       struct mii_bus *bus;
+
+       void __iomem *regs;
+
+       struct napi_struct napi;
+       struct net_device_stats stats;
+
+       struct arc_emac_bd *rxbd;
+       struct arc_emac_bd *txbd;
+
+       dma_addr_t rxbd_dma;
+       dma_addr_t txbd_dma;
+
+       struct buffer_state rx_buff[RX_BD_NUM];
+       struct buffer_state tx_buff[TX_BD_NUM];
+       unsigned int txbd_curr;
+       unsigned int txbd_dirty;
+
+       unsigned int last_rx_bd;
+
+       unsigned int link;
+       unsigned int duplex;
+       unsigned int speed;
+       unsigned int max_speed;
+};
+
+/**
+ * arc_reg_set - Sets EMAC register with provided value.
+ * @priv:      Pointer to ARC EMAC private data structure.
+ * @reg:       Register offset from base address.
+ * @value:     Value to set in register.
+ */
+static inline void arc_reg_set(struct arc_emac_priv *priv, int reg, int value)
+{
+       iowrite32(value, priv->regs + reg * sizeof(int));
+}
+
+/**
+ * arc_reg_get - Gets value of specified EMAC register.
+ * @priv:      Pointer to ARC EMAC private data structure.
+ * @reg:       Register offset from base address.
+ *
+ * returns:    Value of requested register.
+ */
+static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg)
+{
+       return ioread32(priv->regs + reg * sizeof(int));
+}
+
+/**
+ * arc_reg_or - Applies mask to specified EMAC register - ("reg" | "mask").
+ * @priv:      Pointer to ARC EMAC private data structure.
+ * @reg:       Register offset from base address.
+ * @mask:      Mask to apply to specified register.
+ *
+ * This function reads initial register value, then applies provided mask
+ * to it and then writes register back.
+ */
+static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask)
+{
+       unsigned int value = arc_reg_get(priv, reg);
+       arc_reg_set(priv, reg, value | mask);
+}
+
+/**
+ * arc_reg_clr - Applies mask to specified EMAC register - ("reg" & ~"mask").
+ * @priv:      Pointer to ARC EMAC private data structure.
+ * @reg:       Register offset from base address.
+ * @mask:      Mask to apply to specified register.
+ *
+ * This function reads initial register value, then applies provided mask
+ * to it and then writes register back.
+ */
+static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask)
+{
+       unsigned int value = arc_reg_get(priv, reg);
+       arc_reg_set(priv, reg, value & ~mask);
+}
+
+int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv);
+int arc_mdio_remove(struct arc_emac_priv *priv);
+
+#endif /* ARC_EMAC_H */
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
new file mode 100644 (file)
index 0000000..f1b121e
--- /dev/null
@@ -0,0 +1,819 @@
+/*
+ * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the ARC EMAC 10100 (hardware revision 5)
+ *
+ * Contributors:
+ *             Amit Bhor
+ *             Sameer Dhavale
+ *             Vineet Gupta
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+
+#include "emac.h"
+
+#define DRV_NAME       "arc_emac"
+#define DRV_VERSION    "1.0"
+
+/**
+ * arc_emac_adjust_link - Adjust the PHY link duplex.
+ * @ndev:      Pointer to the net_device structure.
+ *
+ * This function is called to change the duplex setting after auto negotiation
+ * is done by the PHY.
+ */
+static void arc_emac_adjust_link(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct phy_device *phy_dev = priv->phy_dev;
+       unsigned int reg, state_changed = 0;
+
+       if (priv->link != phy_dev->link) {
+               priv->link = phy_dev->link;
+               state_changed = 1;
+       }
+
+       if (priv->speed != phy_dev->speed) {
+               priv->speed = phy_dev->speed;
+               state_changed = 1;
+       }
+
+       if (priv->duplex != phy_dev->duplex) {
+               reg = arc_reg_get(priv, R_CTRL);
+
+               if (DUPLEX_FULL == phy_dev->duplex)
+                       reg |= ENFL_MASK;
+               else
+                       reg &= ~ENFL_MASK;
+
+               arc_reg_set(priv, R_CTRL, reg);
+               priv->duplex = phy_dev->duplex;
+               state_changed = 1;
+       }
+
+       if (state_changed)
+               phy_print_status(phy_dev);
+}
+
+/**
+ * arc_emac_get_settings - Get PHY settings.
+ * @ndev:      Pointer to net_device structure.
+ * @cmd:       Pointer to ethtool_cmd structure.
+ *
+ * This implements ethtool command for getting PHY settings. If PHY could
+ * not be found, the function returns -ENODEV. This function calls the
+ * relevant PHY ethtool API to get the PHY settings.
+ * Issue "ethtool ethX" under linux prompt to execute this function.
+ */
+static int arc_emac_get_settings(struct net_device *ndev,
+                                struct ethtool_cmd *cmd)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+
+       return phy_ethtool_gset(priv->phy_dev, cmd);
+}
+
+/**
+ * arc_emac_set_settings - Set PHY settings as passed in the argument.
+ * @ndev:      Pointer to net_device structure.
+ * @cmd:       Pointer to ethtool_cmd structure.
+ *
+ * This implements ethtool command for setting various PHY settings. If PHY
+ * could not be found, the function returns -ENODEV. This function calls the
+ * relevant PHY ethtool API to set the PHY.
+ * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
+ * function.
+ */
+static int arc_emac_set_settings(struct net_device *ndev,
+                                struct ethtool_cmd *cmd)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       return phy_ethtool_sset(priv->phy_dev, cmd);
+}
+
+/**
+ * arc_emac_get_drvinfo - Get EMAC driver information.
+ * @ndev:      Pointer to net_device structure.
+ * @info:      Pointer to ethtool_drvinfo structure.
+ *
+ * This implements ethtool command for getting the driver information.
+ * Issue "ethtool -i ethX" under linux prompt to execute this function.
+ */
+static void arc_emac_get_drvinfo(struct net_device *ndev,
+                                struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static const struct ethtool_ops arc_emac_ethtool_ops = {
+       .get_settings   = arc_emac_get_settings,
+       .set_settings   = arc_emac_set_settings,
+       .get_drvinfo    = arc_emac_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+};
+
+#define FIRST_OR_LAST_MASK     (FIRST_MASK | LAST_MASK)
+
+/**
+ * arc_emac_tx_clean - clears processed by EMAC Tx BDs.
+ * @ndev:      Pointer to the network device.
+ */
+static void arc_emac_tx_clean(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &priv->stats;
+       unsigned int i;
+
+       for (i = 0; i < TX_BD_NUM; i++) {
+               unsigned int *txbd_dirty = &priv->txbd_dirty;
+               struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty];
+               struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty];
+               struct sk_buff *skb = tx_buff->skb;
+               unsigned int info = le32_to_cpu(txbd->info);
+
+               *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
+
+               if ((info & FOR_EMAC) || !txbd->data)
+                       break;
+
+               if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
+                       stats->tx_errors++;
+                       stats->tx_dropped++;
+
+                       if (info & DEFR)
+                               stats->tx_carrier_errors++;
+
+                       if (info & LTCL)
+                               stats->collisions++;
+
+                       if (info & UFLO)
+                               stats->tx_fifo_errors++;
+               } else if (likely(info & FIRST_OR_LAST_MASK)) {
+                       stats->tx_packets++;
+                       stats->tx_bytes += skb->len;
+               }
+
+               dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
+                                dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
+
+               /* return the sk_buff to system */
+               dev_kfree_skb_irq(skb);
+
+               txbd->data = 0;
+               txbd->info = 0;
+
+               if (netif_queue_stopped(ndev))
+                       netif_wake_queue(ndev);
+       }
+}
+
+/**
+ * arc_emac_rx - processing of Rx packets.
+ * @ndev:      Pointer to the network device.
+ * @budget:    How many BDs to process on 1 call.
+ *
+ * returns:    Number of processed BDs
+ *
+ * Iterate through Rx BDs and deliver received packages to upper layer.
+ */
+static int arc_emac_rx(struct net_device *ndev, int budget)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       unsigned int work_done;
+
+       for (work_done = 0; work_done <= budget; work_done++) {
+               unsigned int *last_rx_bd = &priv->last_rx_bd;
+               struct net_device_stats *stats = &priv->stats;
+               struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
+               struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
+               unsigned int pktlen, info = le32_to_cpu(rxbd->info);
+               struct sk_buff *skb;
+               dma_addr_t addr;
+
+               if (unlikely((info & OWN_MASK) == FOR_EMAC))
+                       break;
+
+               /* Make a note that we saw a packet at this BD.
+                * So next time, driver starts from this + 1
+                */
+               *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
+
+               if (unlikely((info & FIRST_OR_LAST_MASK) !=
+                            FIRST_OR_LAST_MASK)) {
+                       /* We pre-allocate buffers of MTU size so incoming
+                        * packets won't be split/chained.
+                        */
+                       if (net_ratelimit())
+                               netdev_err(ndev, "incomplete packet received\n");
+
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+                       stats->rx_errors++;
+                       stats->rx_length_errors++;
+                       continue;
+               }
+
+               pktlen = info & LEN_MASK;
+               stats->rx_packets++;
+               stats->rx_bytes += pktlen;
+               skb = rx_buff->skb;
+               skb_put(skb, pktlen);
+               skb->dev = ndev;
+               skb->protocol = eth_type_trans(skb, ndev);
+
+               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+               /* Prepare the BD for next cycle */
+               rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
+                                                        EMAC_BUFFER_SIZE);
+               if (unlikely(!rx_buff->skb)) {
+                       stats->rx_errors++;
+                       /* Because receive_skb is below, increment rx_dropped */
+                       stats->rx_dropped++;
+                       continue;
+               }
+
+               /* receive_skb only if new skb was allocated to avoid holes */
+               netif_receive_skb(skb);
+
+               addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+                                     EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, addr)) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "cannot dma map\n");
+                       dev_kfree_skb(rx_buff->skb);
+                       stats->rx_errors++;
+                       continue;
+               }
+               dma_unmap_addr_set(rx_buff, addr, addr);
+               dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
+
+               rxbd->data = cpu_to_le32(addr);
+
+               /* Make sure pointer to data buffer is set */
+               wmb();
+
+               /* Return ownership to EMAC */
+               rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+       }
+
+       return work_done;
+}
+
+/**
+ * arc_emac_poll - NAPI poll handler.
+ * @napi:      Pointer to napi_struct structure.
+ * @budget:    How many BDs to process on 1 call.
+ *
+ * returns:    Number of processed BDs
+ */
+static int arc_emac_poll(struct napi_struct *napi, int budget)
+{
+       struct net_device *ndev = napi->dev;
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       unsigned int work_done;
+
+       arc_emac_tx_clean(ndev);
+
+       work_done = arc_emac_rx(ndev, budget);
+       if (work_done < budget) {
+               napi_complete(napi);
+               arc_reg_or(priv, R_ENABLE, RXINT_MASK);
+       }
+
+       return work_done;
+}
+
+/**
+ * arc_emac_intr - Global interrupt handler for EMAC.
+ * @irq:               irq number.
+ * @dev_instance:      device instance.
+ *
+ * returns: IRQ_HANDLED for all cases.
+ *
+ * ARC EMAC has only 1 interrupt line, and depending on bits raised in
+ * STATUS register we may tell what is a reason for interrupt to fire.
+ */
+static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
+{
+       struct net_device *ndev = dev_instance;
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &priv->stats;
+       unsigned int status;
+
+       status = arc_reg_get(priv, R_STATUS);
+       status &= ~MDIO_MASK;
+
+       /* Reset all flags except "MDIO complete" */
+       arc_reg_set(priv, R_STATUS, status);
+
+       if (status & RXINT_MASK) {
+               if (likely(napi_schedule_prep(&priv->napi))) {
+                       arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       if (status & ERR_MASK) {
+               /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
+                * 8-bit error counter overrun.
+                */
+
+               if (status & MSER_MASK) {
+                       stats->rx_missed_errors += 0x100;
+                       stats->rx_errors += 0x100;
+               }
+
+               if (status & RXCR_MASK) {
+                       stats->rx_crc_errors += 0x100;
+                       stats->rx_errors += 0x100;
+               }
+
+               if (status & RXFR_MASK) {
+                       stats->rx_frame_errors += 0x100;
+                       stats->rx_errors += 0x100;
+               }
+
+               if (status & RXFL_MASK) {
+                       stats->rx_over_errors += 0x100;
+                       stats->rx_errors += 0x100;
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * arc_emac_open - Open the network device.
+ * @ndev:      Pointer to the network device.
+ *
+ * returns: 0, on success or non-zero error value on failure.
+ *
+ * This function sets the MAC address, requests and enables an IRQ
+ * for the EMAC device and starts the Tx queue.
+ * It also connects to the phy device.
+ */
+static int arc_emac_open(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct phy_device *phy_dev = priv->phy_dev;
+       int i;
+
+       phy_dev->autoneg = AUTONEG_ENABLE;
+       phy_dev->speed = 0;
+       phy_dev->duplex = 0;
+       phy_dev->advertising = phy_dev->supported;
+
+       if (priv->max_speed > 100) {
+               phy_dev->advertising &= PHY_GBIT_FEATURES;
+       } else if (priv->max_speed <= 100) {
+               phy_dev->advertising &= PHY_BASIC_FEATURES;
+               if (priv->max_speed <= 10) {
+                       phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
+                       phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
+               }
+       }
+
+       priv->last_rx_bd = 0;
+
+       /* Allocate and set buffers for Rx BD's */
+       for (i = 0; i < RX_BD_NUM; i++) {
+               dma_addr_t addr;
+               unsigned int *last_rx_bd = &priv->last_rx_bd;
+               struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
+               struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
+
+               rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
+                                                        EMAC_BUFFER_SIZE);
+               if (unlikely(!rx_buff->skb))
+                       return -ENOMEM;
+
+               addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+                                     EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, addr)) {
+                       netdev_err(ndev, "cannot dma map\n");
+                       dev_kfree_skb(rx_buff->skb);
+                       return -ENOMEM;
+               }
+               dma_unmap_addr_set(rx_buff, addr, addr);
+               dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
+
+               rxbd->data = cpu_to_le32(addr);
+
+               /* Make sure pointer to data buffer is set */
+               wmb();
+
+               /* Return ownership to EMAC */
+               rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+
+               *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
+       }
+
+       /* Clean Tx BD's */
+       memset(priv->txbd, 0, TX_RING_SZ);
+
+       /* Initialize logical address filter */
+       arc_reg_set(priv, R_LAFL, 0);
+       arc_reg_set(priv, R_LAFH, 0);
+
+       /* Set BD ring pointers for device side */
+       arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
+       arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
+
+       /* Enable interrupts */
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+
+       /* Set CONTROL */
+       arc_reg_set(priv, R_CTRL,
+                    (RX_BD_NUM << 24) |        /* RX BD table length */
+                    (TX_BD_NUM << 16) |        /* TX BD table length */
+                    TXRN_MASK | RXRN_MASK);
+
+       napi_enable(&priv->napi);
+
+       /* Enable EMAC */
+       arc_reg_or(priv, R_CTRL, EN_MASK);
+
+       phy_start_aneg(priv->phy_dev);
+
+       netif_start_queue(ndev);
+
+       return 0;
+}
+
+/**
+ * arc_emac_stop - Close the network device.
+ * @ndev:      Pointer to the network device.
+ *
+ * This function stops the Tx queue, disables interrupts and frees the IRQ for
+ * the EMAC device.
+ * It also disconnects the PHY device associated with the EMAC device.
+ */
+static int arc_emac_stop(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+
+       napi_disable(&priv->napi);
+       netif_stop_queue(ndev);
+
+       /* Disable interrupts */
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+
+       /* Disable EMAC */
+       arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+       return 0;
+}
+
+/**
+ * arc_emac_stats - Get system network statistics.
+ * @ndev:      Pointer to net_device structure.
+ *
+ * Returns the address of the device statistics structure.
+ * Statistics are updated in interrupt handler.
+ */
+static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &priv->stats;
+       unsigned long miss, rxerr;
+       u8 rxcrc, rxfram, rxoflow;
+
+       rxerr = arc_reg_get(priv, R_RXERR);
+       miss = arc_reg_get(priv, R_MISS);
+
+       rxcrc = rxerr;
+       rxfram = rxerr >> 8;
+       rxoflow = rxerr >> 16;
+
+       stats->rx_errors += miss;
+       stats->rx_errors += rxcrc + rxfram + rxoflow;
+
+       stats->rx_over_errors += rxoflow;
+       stats->rx_frame_errors += rxfram;
+       stats->rx_crc_errors += rxcrc;
+       stats->rx_missed_errors += miss;
+
+       return stats;
+}
+
+/**
+ * arc_emac_tx - Starts the data transmission.
+ * @skb:       sk_buff pointer that contains data to be Transmitted.
+ * @ndev:      Pointer to net_device structure.
+ *
+ * returns: NETDEV_TX_OK, on success
+ *             NETDEV_TX_BUSY, if any of the descriptors are not free.
+ *
+ * This function is invoked from upper layers to initiate transmission.
+ */
+static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       unsigned int len, *txbd_curr = &priv->txbd_curr;
+       struct net_device_stats *stats = &priv->stats;
+       __le32 *info = &priv->txbd[*txbd_curr].info;
+       dma_addr_t addr;
+
+       if (skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
+       len = max_t(unsigned int, ETH_ZLEN, skb->len);
+
+       /* EMAC still holds this buffer in its possession.
+        * CPU must not modify this buffer descriptor
+        */
+       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+               netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+
+       addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
+                             DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
+               stats->tx_dropped++;
+               stats->tx_errors++;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+       dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
+       dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
+
+       priv->tx_buff[*txbd_curr].skb = skb;
+       priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
+
+       /* Make sure pointer to data buffer is set */
+       wmb();
+
+       *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
+
+       /* Increment index to point to the next BD */
+       *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
+
+       /* Get "info" of the next BD */
+       info = &priv->txbd[*txbd_curr].info;
+
+       /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
+       if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+               netif_stop_queue(ndev);
+
+       arc_reg_set(priv, R_STATUS, TXPL_MASK);
+
+       skb_tx_timestamp(skb);
+
+       return NETDEV_TX_OK;
+}
+
+/**
+ * arc_emac_set_address - Set the MAC address for this device.
+ * @ndev:      Pointer to net_device structure.
+ * @p:         6 byte Address to be written as MAC address.
+ *
+ * This function copies the HW address from the sockaddr structure to the
+ * net_device structure and updates the address in HW.
+ *
+ * returns:    -EBUSY if the net device is busy or 0 if the address is set
+ *             successfully.
+ */
+static int arc_emac_set_address(struct net_device *ndev, void *p)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct sockaddr *addr = p;
+       unsigned int addr_low, addr_hi;
+
+       if (netif_running(ndev))
+               return -EBUSY;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+       addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
+       addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
+
+       arc_reg_set(priv, R_ADDRL, addr_low);
+       arc_reg_set(priv, R_ADDRH, addr_hi);
+
+       return 0;
+}
+
+static const struct net_device_ops arc_emac_netdev_ops = {
+       .ndo_open               = arc_emac_open,
+       .ndo_stop               = arc_emac_stop,
+       .ndo_start_xmit         = arc_emac_tx,
+       .ndo_set_mac_address    = arc_emac_set_address,
+       .ndo_get_stats          = arc_emac_stats,
+};
+
+static int arc_emac_probe(struct platform_device *pdev)
+{
+       struct resource res_regs, res_irq;
+       struct device_node *phy_node;
+       struct arc_emac_priv *priv;
+       struct net_device *ndev;
+       const char *mac_addr;
+       unsigned int id, clock_frequency;
+       int err;
+
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
+       /* Get PHY from device tree */
+       phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0);
+       if (!phy_node) {
+               dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n");
+               return -ENODEV;
+       }
+
+       /* Get EMAC registers base address from device tree */
+       err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs);
+       if (err) {
+               dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n");
+               return -ENODEV;
+       }
+
+       /* Get CPU clock frequency from device tree */
+       if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+                                &clock_frequency)) {
+               dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
+               return -EINVAL;
+       }
+
+       /* Get IRQ from device tree */
+       err = of_irq_to_resource(pdev->dev.of_node, 0, &res_irq);
+       if (!err) {
+               dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n");
+               return -ENODEV;
+       }
+
+       ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
+       if (!ndev)
+               return -ENOMEM;
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       ndev->netdev_ops = &arc_emac_netdev_ops;
+       ndev->ethtool_ops = &arc_emac_ethtool_ops;
+       ndev->watchdog_timeo = TX_TIMEOUT;
+       /* FIXME :: no multicast support yet */
+       ndev->flags &= ~IFF_MULTICAST;
+
+       priv = netdev_priv(ndev);
+       priv->dev = &pdev->dev;
+       priv->ndev = ndev;
+
+       priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
+       if (IS_ERR(priv->regs)) {
+               err = PTR_ERR(priv->regs);
+               goto out;
+       }
+       dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
+
+       id = arc_reg_get(priv, R_ID);
+
+       /* Check for EMAC revision 5 or 7, magic number */
+       if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
+               dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
+               err = -ENODEV;
+               goto out;
+       }
+       dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
+
+       /* Set poll rate so that it polls every 1 ms */
+       arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
+
+       /* Get max speed of operation from device tree */
+       if (of_property_read_u32(pdev->dev.of_node, "max-speed",
+                                &priv->max_speed)) {
+               dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       ndev->irq = res_irq.start;
+       dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
+
+       /* Register interrupt handler for device */
+       err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0,
+                              ndev->name, ndev);
+       if (err) {
+               dev_err(&pdev->dev, "could not allocate IRQ\n");
+               goto out;
+       }
+
+       /* Get MAC address from device tree */
+       mac_addr = of_get_mac_address(pdev->dev.of_node);
+
+       if (!mac_addr || !is_valid_ether_addr(mac_addr))
+               eth_hw_addr_random(ndev);
+       else
+               memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+
+       dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
+
+       /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
+       priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ,
+                                        &priv->rxbd_dma, GFP_KERNEL);
+
+       if (!priv->rxbd) {
+               dev_err(&pdev->dev, "failed to allocate data buffers\n");
+               err = -ENOMEM;
+               goto out;
+       }
+
+       priv->txbd = priv->rxbd + RX_BD_NUM;
+
+       priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
+       dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
+               (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
+
+       err = arc_mdio_probe(pdev, priv);
+       if (err) {
+               dev_err(&pdev->dev, "failed to probe MII bus\n");
+               goto out;
+       }
+
+       priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
+                                      PHY_INTERFACE_MODE_MII);
+       if (!priv->phy_dev) {
+               dev_err(&pdev->dev, "of_phy_connect() failed\n");
+               err = -ENODEV;
+               goto out;
+       }
+
+       dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
+                priv->phy_dev->drv->name, priv->phy_dev->phy_id);
+
+       netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
+
+       err = register_netdev(ndev);
+       if (err) {
+               netif_napi_del(&priv->napi);
+               dev_err(&pdev->dev, "failed to register network device\n");
+               goto out;
+       }
+
+       return 0;
+
+out:
+       free_netdev(ndev);
+       return err;
+}
+
+static int arc_emac_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+
+       phy_disconnect(priv->phy_dev);
+       priv->phy_dev = NULL;
+       arc_mdio_remove(priv);
+       unregister_netdev(ndev);
+       netif_napi_del(&priv->napi);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+static const struct of_device_id arc_emac_dt_ids[] = {
+       { .compatible = "snps,arc-emac" },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, arc_emac_dt_ids);
+
+static struct platform_driver arc_emac_driver = {
+       .probe = arc_emac_probe,
+       .remove = arc_emac_remove,
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table  = arc_emac_dt_ids,
+               },
+};
+
+module_platform_driver(arc_emac_driver);
+
+MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
+MODULE_DESCRIPTION("ARC EMAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
new file mode 100644 (file)
index 0000000..26ba242
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * MDIO implementation for ARC EMAC
+ */
+
+#include <linux/delay.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+
+#include "emac.h"
+
+/* Number of seconds we wait for "MDIO complete" flag to appear */
+#define ARC_MDIO_COMPLETE_POLL_COUNT   1
+
+/**
+ * arc_mdio_complete_wait - Waits until MDIO transaction is completed.
+ * @priv:      Pointer to ARC EMAC private data structure.
+ *
+ * returns:    0 on success, -ETIMEDOUT on a timeout.
+ */
+static int arc_mdio_complete_wait(struct arc_emac_priv *priv)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARC_MDIO_COMPLETE_POLL_COUNT * 40; i++) {
+               unsigned int status = arc_reg_get(priv, R_STATUS);
+
+               status &= MDIO_MASK;
+
+               if (status) {
+                       /* Reset "MDIO complete" flag */
+                       arc_reg_set(priv, R_STATUS, status);
+                       return 0;
+               }
+
+               msleep(25);
+       }
+
+       return -ETIMEDOUT;
+}
+
+/**
+ * arc_mdio_read - MDIO interface read function.
+ * @bus:       Pointer to MII bus structure.
+ * @phy_addr:  Address of the PHY device.
+ * @reg_num:   PHY register to read.
+ *
+ * returns:    The register contents on success, -ETIMEDOUT on a timeout.
+ *
+ * Reads the contents of the requested register from the requested PHY
+ * address.
+ */
+static int arc_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
+{
+       struct arc_emac_priv *priv = bus->priv;
+       unsigned int value;
+       int error;
+
+       arc_reg_set(priv, R_MDIO,
+                   0x60020000 | (phy_addr << 23) | (reg_num << 18));
+
+       error = arc_mdio_complete_wait(priv);
+       if (error < 0)
+               return error;
+
+       value = arc_reg_get(priv, R_MDIO) & 0xffff;
+
+       dev_dbg(priv->dev, "arc_mdio_read(phy_addr=%i, reg_num=%x) = %x\n",
+               phy_addr, reg_num, value);
+
+       return value;
+}
+
+/**
+ * arc_mdio_write - MDIO interface write function.
+ * @bus:       Pointer to MII bus structure.
+ * @phy_addr:  Address of the PHY device.
+ * @reg_num:   PHY register to write to.
+ * @value:     Value to be written into the register.
+ *
+ * returns:    0 on success, -ETIMEDOUT on a timeout.
+ *
+ * Writes the value to the requested register.
+ */
+static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
+                         int reg_num, u16 value)
+{
+       struct arc_emac_priv *priv = bus->priv;
+
+       dev_dbg(priv->dev,
+               "arc_mdio_write(phy_addr=%i, reg_num=%x, value=%x)\n",
+               phy_addr, reg_num, value);
+
+       arc_reg_set(priv, R_MDIO,
+                    0x50020000 | (phy_addr << 23) | (reg_num << 18) | value);
+
+       return arc_mdio_complete_wait(priv);
+}
+
+/**
+ * arc_mdio_probe - MDIO probe function.
+ * @pdev:      Pointer to platform device.
+ * @priv:      Pointer to ARC EMAC private data structure.
+ *
+ * returns:    0 on success, -ENOMEM when mdiobus_alloc
+ * (to allocate memory for MII bus structure) fails.
+ *
+ * Sets up and registers the MDIO interface.
+ */
+int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv)
+{
+       struct mii_bus *bus;
+       int error;
+
+       bus = mdiobus_alloc();
+       if (!bus)
+               return -ENOMEM;
+
+       priv->bus = bus;
+       bus->priv = priv;
+       bus->parent = priv->dev;
+       bus->name = "Synopsys MII Bus",
+       bus->read = &arc_mdio_read;
+       bus->write = &arc_mdio_write;
+
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+
+       error = of_mdiobus_register(bus, pdev->dev.of_node);
+       if (error) {
+               dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
+               mdiobus_free(bus);
+               return error;
+       }
+
+       return 0;
+}
+
+/**
+ * arc_mdio_remove - MDIO remove function.
+ * @priv:      Pointer to ARC EMAC private data structure.
+ *
+ * Unregisters the MDIO and frees any associate memory for MII bus.
+ */
+int arc_mdio_remove(struct arc_emac_priv *priv)
+{
+       mdiobus_unregister(priv->bus);
+       mdiobus_free(priv->bus);
+       priv->bus = NULL;
+
+       return 0;
+}
index 36d6abd1cfff5a147c6ce79f18f2dccd9cac16cd..39e8d6cc8843de8bdb189dfd81331bc296f0b80c 100644 (file)
@@ -22,7 +22,6 @@ config ATL2
        tristate "Atheros L2 Fast Ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros L2 fast ethernet adapter.
@@ -34,7 +33,6 @@ config ATL1
        tristate "Atheros/Attansic L1 Gigabit Ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros/Attansic L1 gigabit ethernet
@@ -47,7 +45,6 @@ config ATL1E
        tristate "Atheros L1E Gigabit Ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros L1E gigabit ethernet adapter.
@@ -59,7 +56,6 @@ config ATL1C
        tristate "Atheros L1C Gigabit Ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros L1C gigabit ethernet adapter.
@@ -67,4 +63,22 @@ config ATL1C
          To compile this driver as a module, choose M here.  The module
          will be called atl1c.
 
+config ALX
+       tristate "Qualcomm Atheros AR816x/AR817x support"
+       depends on PCI
+       select CRC32
+       select NET_CORE
+       select MDIO
+       help
+         This driver supports the Qualcomm Atheros L1F ethernet adapter,
+         i.e. the following chipsets:
+
+         1969:1091 - AR8161 Gigabit Ethernet
+         1969:1090 - AR8162 Fast Ethernet
+         1969:10A1 - AR8171 Gigabit Ethernet
+         1969:10A0 - AR8172 Fast Ethernet
+
+         To compile this driver as a module, choose M here.  The module
+         will be called alx.
+
 endif # NET_VENDOR_ATHEROS
index e7e76fb576ff5c9db197936935ba5c20a0c9aa2a..5cf1c65bbce9877ca243a115aff1c2ee57257b4c 100644 (file)
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
 obj-$(CONFIG_ATL2) += atlx/
 obj-$(CONFIG_ATL1E) += atl1e/
 obj-$(CONFIG_ATL1C) += atl1c/
+obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644 (file)
index 0000000..5901fa4
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ALX) += alx.o
+alx-objs := main.o ethtool.o hw.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644 (file)
index 0000000..50b3ae2
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_H_
+#define _ALX_H_
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include "hw.h"
+
+#define ALX_WATCHDOG_TIME   (5 * HZ)
+
+struct alx_buffer {
+       struct sk_buff *skb;
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(size);
+};
+
+struct alx_rx_queue {
+       struct alx_rrd *rrd;
+       dma_addr_t rrd_dma;
+
+       struct alx_rfd *rfd;
+       dma_addr_t rfd_dma;
+
+       struct alx_buffer *bufs;
+
+       u16 write_idx, read_idx;
+       u16 rrd_read_idx;
+};
+#define ALX_RX_ALLOC_THRESH    32
+
+struct alx_tx_queue {
+       struct alx_txd *tpd;
+       dma_addr_t tpd_dma;
+       struct alx_buffer *bufs;
+       u16 write_idx, read_idx;
+};
+
+#define ALX_DEFAULT_TX_WORK 128
+
+enum alx_device_quirks {
+       ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
+};
+
+struct alx_priv {
+       struct net_device *dev;
+
+       struct alx_hw hw;
+
+       /* all descriptor memory */
+       struct {
+               dma_addr_t dma;
+               void *virt;
+               int size;
+       } descmem;
+
+       /* protect int_mask updates */
+       spinlock_t irq_lock;
+       u32 int_mask;
+
+       int tx_ringsz;
+       int rx_ringsz;
+       int rxbuf_size;
+
+       struct napi_struct napi;
+       struct alx_tx_queue txq;
+       struct alx_rx_queue rxq;
+
+       struct work_struct link_check_wk;
+       struct work_struct reset_wk;
+
+       u16 msg_enable;
+
+       bool msi;
+};
+
+extern const struct ethtool_ops alx_ethtool_ops;
+extern const char alx_drv_name[];
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644 (file)
index 0000000..6fa2aec
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#include "alx.h"
+#include "reg.h"
+#include "hw.h"
+
+
+static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       ecmd->supported = SUPPORTED_10baseT_Half |
+                         SUPPORTED_10baseT_Full |
+                         SUPPORTED_100baseT_Half |
+                         SUPPORTED_100baseT_Full |
+                         SUPPORTED_Autoneg |
+                         SUPPORTED_TP |
+                         SUPPORTED_Pause;
+       if (alx_hw_giga(hw))
+               ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+       ecmd->advertising = ADVERTISED_TP;
+       if (hw->adv_cfg & ADVERTISED_Autoneg)
+               ecmd->advertising |= hw->adv_cfg;
+
+       ecmd->port = PORT_TP;
+       ecmd->phy_address = 0;
+       if (hw->adv_cfg & ADVERTISED_Autoneg)
+               ecmd->autoneg = AUTONEG_ENABLE;
+       else
+               ecmd->autoneg = AUTONEG_DISABLE;
+       ecmd->transceiver = XCVR_INTERNAL;
+
+       if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
+               if (hw->flowctrl & ALX_FC_RX) {
+                       ecmd->advertising |= ADVERTISED_Pause;
+
+                       if (!(hw->flowctrl & ALX_FC_TX))
+                               ecmd->advertising |= ADVERTISED_Asym_Pause;
+               } else if (hw->flowctrl & ALX_FC_TX) {
+                       ecmd->advertising |= ADVERTISED_Asym_Pause;
+               }
+       }
+
+       if (hw->link_speed != SPEED_UNKNOWN) {
+               ethtool_cmd_speed_set(ecmd,
+                                     hw->link_speed - hw->link_speed % 10);
+               ecmd->duplex = hw->link_speed % 10;
+       } else {
+               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               ecmd->duplex = DUPLEX_UNKNOWN;
+       }
+
+       return 0;
+}
+
+static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       u32 adv_cfg;
+
+       ASSERT_RTNL();
+
+       if (ecmd->autoneg == AUTONEG_ENABLE) {
+               if (ecmd->advertising & ADVERTISED_1000baseT_Half)
+                       return -EINVAL;
+               adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
+       } else {
+               int speed = ethtool_cmd_speed(ecmd);
+
+               switch (speed + ecmd->duplex) {
+               case SPEED_10 + DUPLEX_HALF:
+                       adv_cfg = ADVERTISED_10baseT_Half;
+                       break;
+               case SPEED_10 + DUPLEX_FULL:
+                       adv_cfg = ADVERTISED_10baseT_Full;
+                       break;
+               case SPEED_100 + DUPLEX_HALF:
+                       adv_cfg = ADVERTISED_100baseT_Half;
+                       break;
+               case SPEED_100 + DUPLEX_FULL:
+                       adv_cfg = ADVERTISED_100baseT_Full;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       hw->adv_cfg = adv_cfg;
+       return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+}
+
+static void alx_get_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *pause)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       if (hw->flowctrl & ALX_FC_ANEG &&
+           hw->adv_cfg & ADVERTISED_Autoneg)
+               pause->autoneg = AUTONEG_ENABLE;
+       else
+               pause->autoneg = AUTONEG_DISABLE;
+
+       if (hw->flowctrl & ALX_FC_TX)
+               pause->tx_pause = 1;
+       else
+               pause->tx_pause = 0;
+
+       if (hw->flowctrl & ALX_FC_RX)
+               pause->rx_pause = 1;
+       else
+               pause->rx_pause = 0;
+}
+
+
+static int alx_set_pauseparam(struct net_device *netdev,
+                             struct ethtool_pauseparam *pause)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       int err = 0;
+       bool reconfig_phy = false;
+       u8 fc = 0;
+
+       if (pause->tx_pause)
+               fc |= ALX_FC_TX;
+       if (pause->rx_pause)
+               fc |= ALX_FC_RX;
+       if (pause->autoneg)
+               fc |= ALX_FC_ANEG;
+
+       ASSERT_RTNL();
+
+       /* restart auto-neg for auto-mode */
+       if (hw->adv_cfg & ADVERTISED_Autoneg) {
+               if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
+                       reconfig_phy = true;
+               if (fc & hw->flowctrl & ALX_FC_ANEG &&
+                   (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+                       reconfig_phy = true;
+       }
+
+       if (reconfig_phy) {
+               err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
+               return err;
+       }
+
+       /* flow control on mac */
+       if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+               alx_cfg_mac_flowcontrol(hw, fc);
+
+       hw->flowctrl = fc;
+
+       return 0;
+}
+
+static u32 alx_get_msglevel(struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       return alx->msg_enable;
+}
+
+static void alx_set_msglevel(struct net_device *netdev, u32 data)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       alx->msg_enable = data;
+}
+
+static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       wol->supported = WAKE_MAGIC | WAKE_PHY;
+       wol->wolopts = 0;
+
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+               wol->wolopts |= WAKE_MAGIC;
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
+               wol->wolopts |= WAKE_PHY;
+}
+
+static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+                           WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+               return -EOPNOTSUPP;
+
+       hw->sleep_ctrl = 0;
+
+       if (wol->wolopts & WAKE_MAGIC)
+               hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
+       if (wol->wolopts & WAKE_PHY)
+               hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
+
+       device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
+
+       return 0;
+}
+
+static void alx_get_drvinfo(struct net_device *netdev,
+                           struct ethtool_drvinfo *drvinfo)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+const struct ethtool_ops alx_ethtool_ops = {
+       .get_settings   = alx_get_settings,
+       .set_settings   = alx_set_settings,
+       .get_pauseparam = alx_get_pauseparam,
+       .set_pauseparam = alx_set_pauseparam,
+       .get_drvinfo    = alx_get_drvinfo,
+       .get_msglevel   = alx_get_msglevel,
+       .set_msglevel   = alx_set_msglevel,
+       .get_wol        = alx_get_wol,
+       .set_wol        = alx_set_wol,
+       .get_link       = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644 (file)
index 0000000..220a16a
--- /dev/null
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+#include "reg.h"
+#include "hw.h"
+
+static inline bool alx_is_rev_a(u8 rev)
+{
+       return rev == ALX_REV_A0 || rev == ALX_REV_A1;
+}
+
+static int alx_wait_mdio_idle(struct alx_hw *hw)
+{
+       u32 val;
+       int i;
+
+       for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
+               val = alx_read_mem32(hw, ALX_MDIO);
+               if (!(val & ALX_MDIO_BUSY))
+                       return 0;
+               udelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+                            u16 reg, u16 *phy_data)
+{
+       u32 val, clk_sel;
+       int err;
+
+       *phy_data = 0;
+
+       /* use slow clock when it's in hibernation status */
+       clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+                       ALX_MDIO_CLK_SEL_25MD4 :
+                       ALX_MDIO_CLK_SEL_25MD128;
+
+       if (ext) {
+               val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+                     reg << ALX_MDIO_EXTN_REG_SHIFT;
+               alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+               val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
+                     ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
+       } else {
+               val = ALX_MDIO_SPRES_PRMBL |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+                     reg << ALX_MDIO_REG_SHIFT |
+                     ALX_MDIO_START | ALX_MDIO_OP_READ;
+       }
+       alx_write_mem32(hw, ALX_MDIO, val);
+
+       err = alx_wait_mdio_idle(hw);
+       if (err)
+               return err;
+       val = alx_read_mem32(hw, ALX_MDIO);
+       *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
+       return 0;
+}
+
+static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+                             u16 reg, u16 phy_data)
+{
+       u32 val, clk_sel;
+
+       /* use slow clock when it's in hibernation status */
+       clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+                       ALX_MDIO_CLK_SEL_25MD4 :
+                       ALX_MDIO_CLK_SEL_25MD128;
+
+       if (ext) {
+               val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+                     reg << ALX_MDIO_EXTN_REG_SHIFT;
+               alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+               val = ALX_MDIO_SPRES_PRMBL |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+                     phy_data << ALX_MDIO_DATA_SHIFT |
+                     ALX_MDIO_START | ALX_MDIO_MODE_EXT;
+       } else {
+               val = ALX_MDIO_SPRES_PRMBL |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+                     reg << ALX_MDIO_REG_SHIFT |
+                     phy_data << ALX_MDIO_DATA_SHIFT |
+                     ALX_MDIO_START;
+       }
+       alx_write_mem32(hw, ALX_MDIO, val);
+
+       return alx_wait_mdio_idle(hw);
+}
+
+static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+       return alx_read_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+       return alx_write_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+       return alx_read_phy_core(hw, true, dev, reg, pdata);
+}
+
+static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+       return alx_write_phy_core(hw, true, dev, reg, data);
+}
+
+static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+       int err;
+
+       err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+       if (err)
+               return err;
+
+       return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
+}
+
+static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+       int err;
+
+       err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+       if (err)
+               return err;
+
+       return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
+}
+
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_read_phy_reg(hw, reg, phy_data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_write_phy_reg(hw, reg, phy_data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_read_phy_ext(hw, dev, reg, pdata);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_write_phy_ext(hw, dev, reg, data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_read_phy_dbg(hw, reg, pdata);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_write_phy_dbg(hw, reg, data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+static u16 alx_get_phy_config(struct alx_hw *hw)
+{
+       u32 val;
+       u16 phy_val;
+
+       val = alx_read_mem32(hw, ALX_PHY_CTRL);
+       /* phy in reset */
+       if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
+               return ALX_DRV_PHY_UNKNOWN;
+
+       val = alx_read_mem32(hw, ALX_DRV);
+       val = ALX_GET_FIELD(val, ALX_DRV_PHY);
+       if (ALX_DRV_PHY_UNKNOWN == val)
+               return ALX_DRV_PHY_UNKNOWN;
+
+       alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
+       if (ALX_PHY_INITED == phy_val)
+               return val;
+
+       return ALX_DRV_PHY_UNKNOWN;
+}
+
+static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
+{
+       u32 read;
+       int i;
+
+       for (i = 0; i < ALX_SLD_MAX_TO; i++) {
+               read = alx_read_mem32(hw, reg);
+               if ((read & wait) == 0) {
+                       if (val)
+                               *val = read;
+                       return true;
+               }
+               mdelay(1);
+       }
+
+       return false;
+}
+
+static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
+{
+       u32 mac0, mac1;
+
+       mac0 = alx_read_mem32(hw, ALX_STAD0);
+       mac1 = alx_read_mem32(hw, ALX_STAD1);
+
+       /* addr should be big-endian */
+       *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
+       *(__be16 *)addr = cpu_to_be16(mac1);
+
+       return is_valid_ether_addr(addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+       u32 val;
+
+       /* try to get it from register first */
+       if (alx_read_macaddr(hw, addr))
+               return 0;
+
+       /* try to load from efuse */
+       if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
+               return -EIO;
+       alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
+       if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
+               return -EIO;
+       if (alx_read_macaddr(hw, addr))
+               return 0;
+
+       /* try to load from flash/eeprom (if present) */
+       val = alx_read_mem32(hw, ALX_EFLD);
+       if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
+               if (!alx_wait_reg(hw, ALX_EFLD,
+                                 ALX_EFLD_STAT | ALX_EFLD_START, &val))
+                       return -EIO;
+               alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
+               if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
+                       return -EIO;
+               if (alx_read_macaddr(hw, addr))
+                       return 0;
+       }
+
+       return -EIO;
+}
+
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
+{
+       u32 val;
+
+       /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
+       val = be32_to_cpu(*(__be32 *)(addr + 2));
+       alx_write_mem32(hw, ALX_STAD0, val);
+       val = be16_to_cpu(*(__be16 *)addr);
+       alx_write_mem32(hw, ALX_STAD1, val);
+}
+
+static void alx_enable_osc(struct alx_hw *hw)
+{
+       u32 val;
+
+       /* rising edge */
+       val = alx_read_mem32(hw, ALX_MISC);
+       alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
+       alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+}
+
+static void alx_reset_osc(struct alx_hw *hw, u8 rev)
+{
+       u32 val, val2;
+
+       /* clear Internal OSC settings, switching OSC by hw itself */
+       val = alx_read_mem32(hw, ALX_MISC3);
+       alx_write_mem32(hw, ALX_MISC3,
+                       (val & ~ALX_MISC3_25M_BY_SW) |
+                       ALX_MISC3_25M_NOTO_INTNL);
+
+       /* 25M clk from chipset may be unstable 1s after de-assert of
+        * PERST, driver need re-calibrate before enter Sleep for WoL
+        */
+       val = alx_read_mem32(hw, ALX_MISC);
+       if (rev >= ALX_REV_B0) {
+               /* restore over current protection def-val,
+                * this val could be reset by MAC-RST
+                */
+               ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
+               /* a 0->1 change will update the internal val of osc */
+               val &= ~ALX_MISC_INTNLOSC_OPEN;
+               alx_write_mem32(hw, ALX_MISC, val);
+               alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+               /* hw will automatically dis OSC after cab. */
+               val2 = alx_read_mem32(hw, ALX_MSIC2);
+               val2 &= ~ALX_MSIC2_CALB_START;
+               alx_write_mem32(hw, ALX_MSIC2, val2);
+               alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
+       } else {
+               val &= ~ALX_MISC_INTNLOSC_OPEN;
+               /* disable isolate for rev A devices */
+               if (alx_is_rev_a(rev))
+                       val &= ~ALX_MISC_ISO_EN;
+
+               alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+               alx_write_mem32(hw, ALX_MISC, val);
+       }
+
+       udelay(20);
+}
+
+static int alx_stop_mac(struct alx_hw *hw)
+{
+       u32 rxq, txq, val;
+       u16 i;
+
+       rxq = alx_read_mem32(hw, ALX_RXQ0);
+       alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
+       txq = alx_read_mem32(hw, ALX_TXQ0);
+       alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
+
+       udelay(40);
+
+       hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+       for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+               val = alx_read_mem32(hw, ALX_MAC_STS);
+               if (!(val & ALX_MAC_STS_IDLE))
+                       return 0;
+               udelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
+int alx_reset_mac(struct alx_hw *hw)
+{
+       u32 val, pmctrl;
+       int i, ret;
+       u8 rev;
+       bool a_cr;
+
+       pmctrl = 0;
+       rev = alx_hw_revision(hw);
+       a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
+
+       /* disable all interrupts, RXQ/TXQ */
+       alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
+       alx_write_mem32(hw, ALX_IMR, 0);
+       alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+
+       ret = alx_stop_mac(hw);
+       if (ret)
+               return ret;
+
+       /* mac reset workaroud */
+       alx_write_mem32(hw, ALX_RFD_PIDX, 1);
+
+       /* dis l0s/l1 before mac reset */
+       if (a_cr) {
+               pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+               if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+                       alx_write_mem32(hw, ALX_PMCTRL,
+                                       pmctrl & ~(ALX_PMCTRL_L1_EN |
+                                                  ALX_PMCTRL_L0S_EN));
+       }
+
+       /* reset whole mac safely */
+       val = alx_read_mem32(hw, ALX_MASTER);
+       alx_write_mem32(hw, ALX_MASTER,
+                       val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
+
+       /* make sure it's real idle */
+       udelay(10);
+       for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+               val = alx_read_mem32(hw, ALX_RFD_PIDX);
+               if (val == 0)
+                       break;
+               udelay(10);
+       }
+       for (; i < ALX_DMA_MAC_RST_TO; i++) {
+               val = alx_read_mem32(hw, ALX_MASTER);
+               if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
+                       break;
+               udelay(10);
+       }
+       if (i == ALX_DMA_MAC_RST_TO)
+               return -EIO;
+       udelay(10);
+
+       if (a_cr) {
+               alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
+               /* restore l0s / l1 */
+               if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+                       alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+       }
+
+       alx_reset_osc(hw, rev);
+
+       /* clear Internal OSC settings, switching OSC by hw itself,
+        * disable isolate for rev A devices
+        */
+       val = alx_read_mem32(hw, ALX_MISC3);
+       alx_write_mem32(hw, ALX_MISC3,
+                       (val & ~ALX_MISC3_25M_BY_SW) |
+                       ALX_MISC3_25M_NOTO_INTNL);
+       val = alx_read_mem32(hw, ALX_MISC);
+       val &= ~ALX_MISC_INTNLOSC_OPEN;
+       if (alx_is_rev_a(rev))
+               val &= ~ALX_MISC_ISO_EN;
+       alx_write_mem32(hw, ALX_MISC, val);
+       udelay(20);
+
+       /* driver control speed/duplex, hash-alg */
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+       val = alx_read_mem32(hw, ALX_SERDES);
+       alx_write_mem32(hw, ALX_SERDES,
+                       val | ALX_SERDES_MACCLK_SLWDWN |
+                       ALX_SERDES_PHYCLK_SLWDWN);
+
+       return 0;
+}
+
+void alx_reset_phy(struct alx_hw *hw)
+{
+       int i;
+       u32 val;
+       u16 phy_val;
+
+       /* (DSP)reset PHY core */
+       val = alx_read_mem32(hw, ALX_PHY_CTRL);
+       val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
+                ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
+                ALX_PHY_CTRL_CLS);
+       val |= ALX_PHY_CTRL_RST_ANALOG;
+
+       val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
+       alx_write_mem32(hw, ALX_PHY_CTRL, val);
+       udelay(10);
+       alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
+
+       for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
+               udelay(10);
+
+       /* phy power saving & hib */
+       alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
+                         ALX_SYSMODCTRL_IECHOADJ_DEF);
+       alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
+                         ALX_VDRVBIAS_DEF);
+
+       /* EEE advertisement */
+       val = alx_read_mem32(hw, ALX_LPI_CTRL);
+       alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
+       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
+
+       /* phy power saving */
+       alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
+       alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+                         phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
+       /* rtl8139c, 120m issue */
+       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
+                         ALX_MIIEXT_NLP78_120M_DEF);
+       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
+                         ALX_MIIEXT_S3DIG10_DEF);
+
+       if (hw->lnk_patch) {
+               /* Turn off half amplitude */
+               alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+                                &phy_val);
+               alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+                                 phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
+               /* Turn off Green feature */
+               alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+               alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+                                 phy_val | ALX_GREENCFG2_BP_GREEN);
+               /* Turn off half Bias */
+               alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+                                &phy_val);
+               alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+                                 phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
+       }
+
+       /* set phy interrupt mask */
+       alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
+}
+
+#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
+void alx_reset_pcie(struct alx_hw *hw)
+{
+       u8 rev = alx_hw_revision(hw);
+       u32 val;
+       u16 val16;
+
+       /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+       pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
+       if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
+               val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
+               pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
+       }
+
+       /* clear WoL setting/status */
+       val = alx_read_mem32(hw, ALX_WOL0);
+       alx_write_mem32(hw, ALX_WOL0, 0);
+
+       val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+       alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
+
+       /* mask some pcie error bits */
+       val = alx_read_mem32(hw, ALX_UE_SVRT);
+       val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
+       alx_write_mem32(hw, ALX_UE_SVRT, val);
+
+       /* wol 25M & pclk */
+       val = alx_read_mem32(hw, ALX_MASTER);
+       if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
+               if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+                   (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
+                       alx_write_mem32(hw, ALX_MASTER,
+                                       val | ALX_MASTER_PCLKSEL_SRDS |
+                                       ALX_MASTER_WAKEN_25M);
+       } else {
+               if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+                   (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
+                       alx_write_mem32(hw, ALX_MASTER,
+                                       (val & ~ALX_MASTER_PCLKSEL_SRDS) |
+                                       ALX_MASTER_WAKEN_25M);
+       }
+
+       /* ASPM setting */
+       alx_enable_aspm(hw, true, true);
+
+       udelay(10);
+}
+
+void alx_start_mac(struct alx_hw *hw)
+{
+       u32 mac, txq, rxq;
+
+       rxq = alx_read_mem32(hw, ALX_RXQ0);
+       alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
+       txq = alx_read_mem32(hw, ALX_TXQ0);
+       alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
+
+       mac = hw->rx_ctrl;
+       if (hw->link_speed % 10 == DUPLEX_FULL)
+               mac |= ALX_MAC_CTRL_FULLD;
+       else
+               mac &= ~ALX_MAC_CTRL_FULLD;
+       ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+                     hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
+                                                    ALX_MAC_CTRL_SPEED_10_100);
+       mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
+       hw->rx_ctrl = mac;
+       alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+}
+
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
+{
+       if (fc & ALX_FC_RX)
+               hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
+       else
+               hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
+
+       if (fc & ALX_FC_TX)
+               hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
+       else
+               hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
+
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+       u32 pmctrl;
+       u8 rev = alx_hw_revision(hw);
+
+       pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+
+       ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
+                     ALX_PMCTRL_LCKDET_TIMER_DEF);
+       pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
+                 ALX_PMCTRL_L1_CLKSW_EN |
+                 ALX_PMCTRL_L1_SRDSRX_PWD;
+       ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
+       ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
+       pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
+                   ALX_PMCTRL_L1_SRDSPLL_EN |
+                   ALX_PMCTRL_L1_BUFSRX_EN |
+                   ALX_PMCTRL_SADLY_EN |
+                   ALX_PMCTRL_HOTRST_WTEN|
+                   ALX_PMCTRL_L0S_EN |
+                   ALX_PMCTRL_L1_EN |
+                   ALX_PMCTRL_ASPM_FCEN |
+                   ALX_PMCTRL_TXL1_AFTER_L0S |
+                   ALX_PMCTRL_RXL1_AFTER_L0S);
+       if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
+               pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
+
+       if (l0s_en)
+               pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
+       if (l1_en)
+               pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
+
+       alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+}
+
+
+static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
+{
+       u32 cfg = 0;
+
+       if (ethadv_cfg & ADVERTISED_Autoneg) {
+               cfg |= ALX_DRV_PHY_AUTO;
+               if (ethadv_cfg & ADVERTISED_10baseT_Half)
+                       cfg |= ALX_DRV_PHY_10;
+               if (ethadv_cfg & ADVERTISED_10baseT_Full)
+                       cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+               if (ethadv_cfg & ADVERTISED_100baseT_Half)
+                       cfg |= ALX_DRV_PHY_100;
+               if (ethadv_cfg & ADVERTISED_100baseT_Full)
+                       cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+               if (ethadv_cfg & ADVERTISED_1000baseT_Half)
+                       cfg |= ALX_DRV_PHY_1000;
+               if (ethadv_cfg & ADVERTISED_1000baseT_Full)
+                       cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+               if (ethadv_cfg & ADVERTISED_Pause)
+                       cfg |= ADVERTISE_PAUSE_CAP;
+               if (ethadv_cfg & ADVERTISED_Asym_Pause)
+                       cfg |= ADVERTISE_PAUSE_ASYM;
+       } else {
+               switch (ethadv_cfg) {
+               case ADVERTISED_10baseT_Half:
+                       cfg |= ALX_DRV_PHY_10;
+                       break;
+               case ADVERTISED_100baseT_Half:
+                       cfg |= ALX_DRV_PHY_100;
+                       break;
+               case ADVERTISED_10baseT_Full:
+                       cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+                       break;
+               case ADVERTISED_100baseT_Full:
+                       cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+                       break;
+               }
+       }
+
+       return cfg;
+}
+
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
+{
+       u16 adv, giga, cr;
+       u32 val;
+       int err = 0;
+
+       alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
+       val = alx_read_mem32(hw, ALX_DRV);
+       ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
+
+       if (ethadv & ADVERTISED_Autoneg) {
+               adv = ADVERTISE_CSMA;
+               adv |= ethtool_adv_to_mii_adv_t(ethadv);
+
+               if (flowctrl & ALX_FC_ANEG) {
+                       if (flowctrl & ALX_FC_RX) {
+                               adv |= ADVERTISED_Pause;
+                               if (!(flowctrl & ALX_FC_TX))
+                                       adv |= ADVERTISED_Asym_Pause;
+                       } else if (flowctrl & ALX_FC_TX) {
+                               adv |= ADVERTISED_Asym_Pause;
+                       }
+               }
+               giga = 0;
+               if (alx_hw_giga(hw))
+                       giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
+
+               cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+
+               if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
+                   alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
+                   alx_write_phy_reg(hw, MII_BMCR, cr))
+                       err = -EBUSY;
+       } else {
+               cr = BMCR_RESET;
+               if (ethadv == ADVERTISED_100baseT_Half ||
+                   ethadv == ADVERTISED_100baseT_Full)
+                       cr |= BMCR_SPEED100;
+               if (ethadv == ADVERTISED_10baseT_Full ||
+                   ethadv == ADVERTISED_100baseT_Full)
+                       cr |= BMCR_FULLDPLX;
+
+               err = alx_write_phy_reg(hw, MII_BMCR, cr);
+       }
+
+       if (!err) {
+               alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
+               val |= ethadv_to_hw_cfg(hw, ethadv);
+       }
+
+       alx_write_mem32(hw, ALX_DRV, val);
+
+       return err;
+}
+
+
+void alx_post_phy_link(struct alx_hw *hw)
+{
+       u16 phy_val, len, agc;
+       u8 revid = alx_hw_revision(hw);
+       bool adj_th = revid == ALX_REV_B0;
+       int speed;
+
+       if (hw->link_speed == SPEED_UNKNOWN)
+               speed = SPEED_UNKNOWN;
+       else
+               speed = hw->link_speed - hw->link_speed % 10;
+
+       if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
+               return;
+
+       /* 1000BT/AZ, wrong cable length */
+       if (speed != SPEED_UNKNOWN) {
+               alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
+                                &phy_val);
+               len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
+               alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
+               agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
+
+               if ((speed == SPEED_1000 &&
+                    (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
+                     (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
+                   (speed == SPEED_100 &&
+                    (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
+                     (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+                                         ALX_AZ_ANADECT_LONG);
+                       alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                        &phy_val);
+                       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                         phy_val | ALX_AFE_10BT_100M_TH);
+               } else {
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+                                         ALX_AZ_ANADECT_DEF);
+                       alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
+                                        ALX_MIIEXT_AFE, &phy_val);
+                       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                         phy_val & ~ALX_AFE_10BT_100M_TH);
+               }
+
+               /* threshold adjust */
+               if (adj_th && hw->lnk_patch) {
+                       if (speed == SPEED_100) {
+                               alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+                                                 ALX_MSE16DB_UP);
+                       } else if (speed == SPEED_1000) {
+                               /*
+                                * Giga link threshold, raise the tolerance of
+                                * noise 50%
+                                */
+                               alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+                                                &phy_val);
+                               ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+                                             ALX_MSE20DB_TH_HI);
+                               alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+                                                 phy_val);
+                       }
+               }
+       } else {
+               alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                &phy_val);
+               alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                 phy_val & ~ALX_AFE_10BT_100M_TH);
+
+               if (adj_th && hw->lnk_patch) {
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+                                         ALX_MSE16DB_DOWN);
+                       alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
+                       ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+                                     ALX_MSE20DB_TH_DEF);
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
+               }
+       }
+}
+
+
+/* NOTE:
+ *    1. phy link must be established before calling this function
+ *    2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+int alx_pre_suspend(struct alx_hw *hw, int speed)
+{
+       u32 master, mac, phy, val;
+       int err = 0;
+
+       master = alx_read_mem32(hw, ALX_MASTER);
+       master &= ~ALX_MASTER_PCLKSEL_SRDS;
+       mac = hw->rx_ctrl;
+       /* 10/100 half */
+       ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,  ALX_MAC_CTRL_SPEED_10_100);
+       mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+
+       phy = alx_read_mem32(hw, ALX_PHY_CTRL);
+       phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
+       phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
+              ALX_PHY_CTRL_HIB_EN;
+
+       /* without any activity  */
+       if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
+               err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+               if (err)
+                       return err;
+               phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
+       } else {
+               if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
+                       mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
+               if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
+                       mac |= ALX_MAC_CTRL_TX_EN;
+               if (speed % 10 == DUPLEX_FULL)
+                       mac |= ALX_MAC_CTRL_FULLD;
+               if (speed >= SPEED_1000)
+                       ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+                                     ALX_MAC_CTRL_SPEED_1000);
+               phy |= ALX_PHY_CTRL_DSPRST_OUT;
+               err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
+                                       ALX_MIIEXT_S3DIG10,
+                                       ALX_MIIEXT_S3DIG10_SL);
+               if (err)
+                       return err;
+       }
+
+       alx_enable_osc(hw);
+       hw->rx_ctrl = mac;
+       alx_write_mem32(hw, ALX_MASTER, master);
+       alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+       alx_write_mem32(hw, ALX_PHY_CTRL, phy);
+
+       /* set val of PDLL D3PLLOFF */
+       val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+       val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
+       alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
+
+       return 0;
+}
+
+bool alx_phy_configured(struct alx_hw *hw)
+{
+       u32 cfg, hw_cfg;
+
+       cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
+       cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
+       hw_cfg = alx_get_phy_config(hw);
+
+       if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
+               return false;
+
+       return cfg == hw_cfg;
+}
+
+int alx_get_phy_link(struct alx_hw *hw, int *speed)
+{
+       struct pci_dev *pdev = hw->pdev;
+       u16 bmsr, giga;
+       int err;
+
+       err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+       if (err)
+               return err;
+
+       err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+       if (err)
+               return err;
+
+       if (!(bmsr & BMSR_LSTATUS)) {
+               *speed = SPEED_UNKNOWN;
+               return 0;
+       }
+
+       /* speed/duplex result is saved in PHY Specific Status Register */
+       err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
+       if (err)
+               return err;
+
+       if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
+               goto wrong_speed;
+
+       switch (giga & ALX_GIGA_PSSR_SPEED) {
+       case ALX_GIGA_PSSR_1000MBS:
+               *speed = SPEED_1000;
+               break;
+       case ALX_GIGA_PSSR_100MBS:
+               *speed = SPEED_100;
+               break;
+       case ALX_GIGA_PSSR_10MBS:
+               *speed = SPEED_10;
+               break;
+       default:
+               goto wrong_speed;
+       }
+
+       *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+       return 1;
+
+wrong_speed:
+       dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
+       return -EINVAL;
+}
+
+int alx_clear_phy_intr(struct alx_hw *hw)
+{
+       u16 isr;
+
+       /* clear interrupt status by reading it */
+       return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
+}
+
+int alx_config_wol(struct alx_hw *hw)
+{
+       u32 wol = 0;
+       int err = 0;
+
+       /* turn on magic packet event */
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+               wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
+
+       /* turn on link up event */
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
+               wol |=  ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
+               /* only link up can wake up */
+               err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
+       }
+       alx_write_mem32(hw, ALX_WOL0, wol);
+
+       return err;
+}
+
+void alx_disable_rss(struct alx_hw *hw)
+{
+       u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
+
+       ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
+       alx_write_mem32(hw, ALX_RXQ0, ctrl);
+}
+
+void alx_configure_basic(struct alx_hw *hw)
+{
+       u32 val, raw_mtu, max_payload;
+       u16 val16;
+       u8 chip_rev = alx_hw_revision(hw);
+
+       alx_set_macaddr(hw, hw->mac_addr);
+
+       alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
+
+       /* idle timeout to switch clk_125M */
+       if (chip_rev >= ALX_REV_B0)
+               alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
+                               ALX_IDLE_DECISN_TIMER_DEF);
+
+       alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
+
+       val = alx_read_mem32(hw, ALX_MASTER);
+       val |= ALX_MASTER_IRQMOD2_EN |
+              ALX_MASTER_IRQMOD1_EN |
+              ALX_MASTER_SYSALVTIMER_EN;
+       alx_write_mem32(hw, ALX_MASTER, val);
+       alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
+                       (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
+       /* intr re-trig timeout */
+       alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
+       /* tpd threshold to trig int */
+       alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
+       alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
+
+       raw_mtu = hw->mtu + ETH_HLEN;
+       alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
+       if (raw_mtu > ALX_MTU_JUMBO_TH)
+               hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
+
+       if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
+               val = (raw_mtu + 8 + 7) >> 3;
+       else
+               val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
+       alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
+
+       max_payload = pcie_get_readrq(hw->pdev) >> 8;
+       /*
+        * if BIOS had changed the default dma read max length,
+        * restore it to default value
+        */
+       if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
+               pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
+
+       val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
+             ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
+             ALX_TXQ0_SUPT_IPOPT |
+             ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
+       alx_write_mem32(hw, ALX_TXQ0, val);
+       val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
+             ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
+             ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
+             ALX_HQTPD_BURST_EN;
+       alx_write_mem32(hw, ALX_HQTPD, val);
+
+       /* rxq, flow control */
+       val = alx_read_mem32(hw, ALX_SRAM5);
+       val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
+       if (val > ALX_SRAM_RXF_LEN_8K) {
+               val16 = ALX_MTU_STD_ALGN >> 3;
+               val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
+       } else {
+               val16 = ALX_MTU_STD_ALGN >> 3;
+               val = (val - ALX_MTU_STD_ALGN) >> 3;
+       }
+       alx_write_mem32(hw, ALX_RXQ2,
+                       val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
+                       val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
+       val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
+             ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
+             ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
+             ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
+             ALX_RXQ0_IPV6_PARSE_EN;
+
+       if (alx_hw_giga(hw))
+               ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
+                             ALX_RXQ0_ASPM_THRESH_100M);
+
+       alx_write_mem32(hw, ALX_RXQ0, val);
+
+       val = alx_read_mem32(hw, ALX_DMA);
+       val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
+             ALX_DMA_RREQ_PRI_DATA |
+             max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
+             ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
+             ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
+             (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
+       alx_write_mem32(hw, ALX_DMA, val);
+
+       /* default multi-tx-q weights */
+       val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
+             4 << ALX_WRR_PRI0_SHIFT |
+             4 << ALX_WRR_PRI1_SHIFT |
+             4 << ALX_WRR_PRI2_SHIFT |
+             4 << ALX_WRR_PRI3_SHIFT;
+       alx_write_mem32(hw, ALX_WRR, val);
+}
+
+static inline u32 alx_speed_to_ethadv(int speed)
+{
+       switch (speed) {
+       case SPEED_1000 + DUPLEX_FULL:
+               return ADVERTISED_1000baseT_Full;
+       case SPEED_100 + DUPLEX_FULL:
+               return ADVERTISED_100baseT_Full;
+       case SPEED_100 + DUPLEX_HALF:
+               return ADVERTISED_10baseT_Half;
+       case SPEED_10 + DUPLEX_FULL:
+               return ADVERTISED_10baseT_Full;
+       case SPEED_10 + DUPLEX_HALF:
+               return ADVERTISED_10baseT_Half;
+       default:
+               return 0;
+       }
+}
+
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
+{
+       int i, err, spd;
+       u16 lpa;
+
+       err = alx_get_phy_link(hw, &spd);
+       if (err < 0)
+               return err;
+
+       if (spd == SPEED_UNKNOWN)
+               return 0;
+
+       err = alx_read_phy_reg(hw, MII_LPA, &lpa);
+       if (err)
+               return err;
+
+       if (!(lpa & LPA_LPACK)) {
+               *speed = spd;
+               return 0;
+       }
+
+       if (lpa & LPA_10FULL)
+               *speed = SPEED_10 + DUPLEX_FULL;
+       else if (lpa & LPA_10HALF)
+               *speed = SPEED_10 + DUPLEX_HALF;
+       else if (lpa & LPA_100FULL)
+               *speed = SPEED_100 + DUPLEX_FULL;
+       else
+               *speed = SPEED_100 + DUPLEX_HALF;
+
+       if (*speed != spd) {
+               err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+               if (err)
+                       return err;
+               err = alx_setup_speed_duplex(hw,
+                                            alx_speed_to_ethadv(*speed) |
+                                            ADVERTISED_Autoneg,
+                                            ALX_FC_ANEG | ALX_FC_RX |
+                                            ALX_FC_TX);
+               if (err)
+                       return err;
+
+               /* wait for linkup */
+               for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+                       int speed2;
+
+                       msleep(100);
+
+                       err = alx_get_phy_link(hw, &speed2);
+                       if (err < 0)
+                               return err;
+                       if (speed2 != SPEED_UNKNOWN)
+                               break;
+               }
+               if (i == ALX_MAX_SETUP_LNK_CYCLE)
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+bool alx_get_phy_info(struct alx_hw *hw)
+{
+       u16  devs1, devs2;
+
+       if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
+           alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
+               return false;
+
+       /* since we haven't PMA/PMD status2 register, we can't
+        * use mdio45_probe function for prtad and mmds.
+        * use fixed MMD3 to get mmds.
+        */
+       if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
+           alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
+               return false;
+       hw->mdio.mmds = devs1 | devs2 << 16;
+
+       return true;
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644 (file)
index 0000000..65e723d
--- /dev/null
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_HW_H_
+#define ALX_HW_H_
+#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/pci.h>
+#include "reg.h"
+
+/* Transmit Packet Descriptor, contains 4 32-bit words.
+ *
+ *   31               16               0
+ *   +----------------+----------------+
+ *   |    vlan-tag    |   buf length   |
+ *   +----------------+----------------+
+ *   |              Word 1             |
+ *   +----------------+----------------+
+ *   |      Word 2: buf addr lo        |
+ *   +----------------+----------------+
+ *   |      Word 3: buf addr hi        |
+ *   +----------------+----------------+
+ *
+ * Word 2 and 3 combine to form a 64-bit buffer address
+ *
+ * Word 1 has three forms, depending on the state of bit 8/12/13:
+ * if bit8 =='1', the definition is just for custom checksum offload.
+ * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
+ *     for the skb is special for LSO V2, Word 2 become total skb length ,
+ *     Word 3 is meaningless.
+ * other condition, the definition is for general skb or ip/tcp/udp
+ *     checksum or LSO(TSO) offload.
+ *
+ * Here is the depiction:
+ *
+ *   0-+                                  0-+
+ *   1 |                                  1 |
+ *   2 |                                  2 |
+ *   3 |    Payload offset                3 |    L4 header offset
+ *   4 |        (7:0)                     4 |        (7:0)
+ *   5 |                                  5 |
+ *   6 |                                  6 |
+ *   7-+                                  7-+
+ *   8      Custom csum enable = 1        8      Custom csum enable = 0
+ *   9      General IPv4 checksum         9      General IPv4 checksum
+ *   10     General TCP checksum          10     General TCP checksum
+ *   11     General UDP checksum          11     General UDP checksum
+ *   12     Large Send Segment enable     12     Large Send Segment enable
+ *   13     Large Send Segment type       13     Large Send Segment type
+ *   14     VLAN tagged                   14     VLAN tagged
+ *   15     Insert VLAN tag               15     Insert VLAN tag
+ *   16     IPv4 packet                   16     IPv4 packet
+ *   17     Ethernet frame type           17     Ethernet frame type
+ *   18-+                                 18-+
+ *   19 |                                 19 |
+ *   20 |                                 20 |
+ *   21 |   Custom csum offset            21 |
+ *   22 |       (25:18)                   22 |
+ *   23 |                                 23 |   MSS (30:18)
+ *   24 |                                 24 |
+ *   25-+                                 25 |
+ *   26-+                                 26 |
+ *   27 |                                 27 |
+ *   28 |   Reserved                      28 |
+ *   29 |                                 29 |
+ *   30-+                                 30-+
+ *   31     End of packet                 31     End of packet
+ */
+struct alx_txd {
+       __le16 len;
+       __le16 vlan_tag;
+       __le32 word1;
+       union {
+               __le64 addr;
+               struct {
+                       __le32 pkt_len;
+                       __le32 resvd;
+               } l;
+       } adrl;
+} __packed;
+
+/* tpd word 1 */
+#define TPD_CXSUMSTART_MASK            0x00FF
+#define TPD_CXSUMSTART_SHIFT           0
+#define TPD_L4HDROFFSET_MASK           0x00FF
+#define TPD_L4HDROFFSET_SHIFT          0
+#define TPD_CXSUM_EN_MASK              0x0001
+#define TPD_CXSUM_EN_SHIFT             8
+#define TPD_IP_XSUM_MASK               0x0001
+#define TPD_IP_XSUM_SHIFT              9
+#define TPD_TCP_XSUM_MASK              0x0001
+#define TPD_TCP_XSUM_SHIFT             10
+#define TPD_UDP_XSUM_MASK              0x0001
+#define TPD_UDP_XSUM_SHIFT             11
+#define TPD_LSO_EN_MASK                        0x0001
+#define TPD_LSO_EN_SHIFT               12
+#define TPD_LSO_V2_MASK                        0x0001
+#define TPD_LSO_V2_SHIFT               13
+#define TPD_VLTAGGED_MASK              0x0001
+#define TPD_VLTAGGED_SHIFT             14
+#define TPD_INS_VLTAG_MASK             0x0001
+#define TPD_INS_VLTAG_SHIFT            15
+#define TPD_IPV4_MASK                  0x0001
+#define TPD_IPV4_SHIFT                 16
+#define TPD_ETHTYPE_MASK               0x0001
+#define TPD_ETHTYPE_SHIFT              17
+#define TPD_CXSUMOFFSET_MASK           0x00FF
+#define TPD_CXSUMOFFSET_SHIFT          18
+#define TPD_MSS_MASK                   0x1FFF
+#define TPD_MSS_SHIFT                  18
+#define TPD_EOP_MASK                   0x0001
+#define TPD_EOP_SHIFT                  31
+
+#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
+
+/* Receive Free Descriptor */
+struct alx_rfd {
+       __le64 addr;            /* data buffer address, length is
+                                * declared in register --- every
+                                * buffer has the same size
+                                */
+} __packed;
+
+/* Receive Return Descriptor, contains 4 32-bit words.
+ *
+ *   31               16               0
+ *   +----------------+----------------+
+ *   |              Word 0             |
+ *   +----------------+----------------+
+ *   |     Word 1: RSS Hash value      |
+ *   +----------------+----------------+
+ *   |              Word 2             |
+ *   +----------------+----------------+
+ *   |              Word 3             |
+ *   +----------------+----------------+
+ *
+ * Word 0 depiction         &            Word 2 depiction:
+ *
+ *   0--+                                 0--+
+ *   1  |                                 1  |
+ *   2  |                                 2  |
+ *   3  |                                 3  |
+ *   4  |                                 4  |
+ *   5  |                                 5  |
+ *   6  |                                 6  |
+ *   7  |    IP payload checksum          7  |     VLAN tag
+ *   8  |         (15:0)                  8  |      (15:0)
+ *   9  |                                 9  |
+ *   10 |                                 10 |
+ *   11 |                                 11 |
+ *   12 |                                 12 |
+ *   13 |                                 13 |
+ *   14 |                                 14 |
+ *   15-+                                 15-+
+ *   16-+                                 16-+
+ *   17 |     Number of RFDs              17 |
+ *   18 |        (19:16)                  18 |
+ *   19-+                                 19 |     Protocol ID
+ *   20-+                                 20 |      (23:16)
+ *   21 |                                 21 |
+ *   22 |                                 22 |
+ *   23 |                                 23-+
+ *   24 |                                 24 |     Reserved
+ *   25 |     Start index of RFD-ring     25-+
+ *   26 |         (31:20)                 26 |     RSS Q-num (27:25)
+ *   27 |                                 27-+
+ *   28 |                                 28-+
+ *   29 |                                 29 |     RSS Hash algorithm
+ *   30 |                                 30 |      (31:28)
+ *   31-+                                 31-+
+ *
+ * Word 3 depiction:
+ *
+ *   0--+
+ *   1  |
+ *   2  |
+ *   3  |
+ *   4  |
+ *   5  |
+ *   6  |
+ *   7  |    Packet length (include FCS)
+ *   8  |         (13:0)
+ *   9  |
+ *   10 |
+ *   11 |
+ *   12 |
+ *   13-+
+ *   14      L4 Header checksum error
+ *   15      IPv4 checksum error
+ *   16      VLAN tagged
+ *   17-+
+ *   18 |    Protocol ID (19:17)
+ *   19-+
+ *   20      Receive error summary
+ *   21      FCS(CRC) error
+ *   22      Frame alignment error
+ *   23      Truncated packet
+ *   24      Runt packet
+ *   25      Incomplete packet due to insufficient rx-desc
+ *   26      Broadcast packet
+ *   27      Multicast packet
+ *   28      Ethernet type (EII or 802.3)
+ *   29      FIFO overflow
+ *   30      Length error (for 802.3, length field mismatch with actual len)
+ *   31      Updated, indicate to driver that this RRD is refreshed.
+ */
+struct alx_rrd {
+       __le32 word0;
+       __le32 rss_hash;
+       __le32 word2;
+       __le32 word3;
+} __packed;
+
+/* rrd word 0 */
+#define RRD_XSUM_MASK          0xFFFF
+#define RRD_XSUM_SHIFT         0
+#define RRD_NOR_MASK           0x000F
+#define RRD_NOR_SHIFT          16
+#define RRD_SI_MASK            0x0FFF
+#define RRD_SI_SHIFT           20
+
+/* rrd word 2 */
+#define RRD_VLTAG_MASK         0xFFFF
+#define RRD_VLTAG_SHIFT                0
+#define RRD_PID_MASK           0x00FF
+#define RRD_PID_SHIFT          16
+/* non-ip packet */
+#define RRD_PID_NONIP          0
+/* ipv4(only) */
+#define RRD_PID_IPV4           1
+/* tcp/ipv6 */
+#define RRD_PID_IPV6TCP                2
+/* tcp/ipv4 */
+#define RRD_PID_IPV4TCP                3
+/* udp/ipv6 */
+#define RRD_PID_IPV6UDP                4
+/* udp/ipv4 */
+#define RRD_PID_IPV4UDP                5
+/* ipv6(only) */
+#define RRD_PID_IPV6           6
+/* LLDP packet */
+#define RRD_PID_LLDP           7
+/* 1588 packet */
+#define RRD_PID_1588           8
+#define RRD_RSSQ_MASK          0x0007
+#define RRD_RSSQ_SHIFT         25
+#define RRD_RSSALG_MASK                0x000F
+#define RRD_RSSALG_SHIFT       28
+#define RRD_RSSALG_TCPV6       0x1
+#define RRD_RSSALG_IPV6                0x2
+#define RRD_RSSALG_TCPV4       0x4
+#define RRD_RSSALG_IPV4                0x8
+
+/* rrd word 3 */
+#define RRD_PKTLEN_MASK                0x3FFF
+#define RRD_PKTLEN_SHIFT       0
+#define RRD_ERR_L4_MASK                0x0001
+#define RRD_ERR_L4_SHIFT       14
+#define RRD_ERR_IPV4_MASK      0x0001
+#define RRD_ERR_IPV4_SHIFT     15
+#define RRD_VLTAGGED_MASK      0x0001
+#define RRD_VLTAGGED_SHIFT     16
+#define RRD_OLD_PID_MASK       0x0007
+#define RRD_OLD_PID_SHIFT      17
+#define RRD_ERR_RES_MASK       0x0001
+#define RRD_ERR_RES_SHIFT      20
+#define RRD_ERR_FCS_MASK       0x0001
+#define RRD_ERR_FCS_SHIFT      21
+#define RRD_ERR_FAE_MASK       0x0001
+#define RRD_ERR_FAE_SHIFT      22
+#define RRD_ERR_TRUNC_MASK     0x0001
+#define RRD_ERR_TRUNC_SHIFT    23
+#define RRD_ERR_RUNT_MASK      0x0001
+#define RRD_ERR_RUNT_SHIFT     24
+#define RRD_ERR_ICMP_MASK      0x0001
+#define RRD_ERR_ICMP_SHIFT     25
+#define RRD_BCAST_MASK         0x0001
+#define RRD_BCAST_SHIFT                26
+#define RRD_MCAST_MASK         0x0001
+#define RRD_MCAST_SHIFT                27
+#define RRD_ETHTYPE_MASK       0x0001
+#define RRD_ETHTYPE_SHIFT      28
+#define RRD_ERR_FIFOV_MASK     0x0001
+#define RRD_ERR_FIFOV_SHIFT    29
+#define RRD_ERR_LEN_MASK       0x0001
+#define RRD_ERR_LEN_SHIFT      30
+#define RRD_UPDATED_MASK       0x0001
+#define RRD_UPDATED_SHIFT      31
+
+
+#define ALX_MAX_SETUP_LNK_CYCLE        50
+
+/* for FlowControl */
+#define ALX_FC_RX              0x01
+#define ALX_FC_TX              0x02
+#define ALX_FC_ANEG            0x04
+
+/* for sleep control */
+#define ALX_SLEEP_WOL_PHY      0x00000001
+#define ALX_SLEEP_WOL_MAGIC    0x00000002
+#define ALX_SLEEP_CIFS         0x00000004
+#define ALX_SLEEP_ACTIVE       (ALX_SLEEP_WOL_PHY | \
+                                ALX_SLEEP_WOL_MAGIC | \
+                                ALX_SLEEP_CIFS)
+
+/* for RSS hash type */
+#define ALX_RSS_HASH_TYPE_IPV4         0x1
+#define ALX_RSS_HASH_TYPE_IPV4_TCP     0x2
+#define ALX_RSS_HASH_TYPE_IPV6         0x4
+#define ALX_RSS_HASH_TYPE_IPV6_TCP     0x8
+#define ALX_RSS_HASH_TYPE_ALL          (ALX_RSS_HASH_TYPE_IPV4 | \
+                                        ALX_RSS_HASH_TYPE_IPV4_TCP | \
+                                        ALX_RSS_HASH_TYPE_IPV6 | \
+                                        ALX_RSS_HASH_TYPE_IPV6_TCP)
+#define ALX_DEF_RXBUF_SIZE     1536
+#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
+#define ALX_MAX_TSO_PKT_SIZE   (7*1024)
+#define ALX_MAX_FRAME_SIZE     ALX_MAX_JUMBO_PKT_SIZE
+#define ALX_MIN_FRAME_SIZE     68
+#define ALX_RAW_MTU(_mtu)      (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+#define ALX_MAX_RX_QUEUES      8
+#define ALX_MAX_TX_QUEUES      4
+#define ALX_MAX_HANDLED_INTRS  5
+
+#define ALX_ISR_MISC           (ALX_ISR_PCIE_LNKDOWN | \
+                                ALX_ISR_DMAW | \
+                                ALX_ISR_DMAR | \
+                                ALX_ISR_SMB | \
+                                ALX_ISR_MANU | \
+                                ALX_ISR_TIMER)
+
+#define ALX_ISR_FATAL          (ALX_ISR_PCIE_LNKDOWN | \
+                                ALX_ISR_DMAW | ALX_ISR_DMAR)
+
+#define ALX_ISR_ALERT          (ALX_ISR_RXF_OV | \
+                                ALX_ISR_TXF_UR | \
+                                ALX_ISR_RFD_UR)
+
+#define ALX_ISR_ALL_QUEUES     (ALX_ISR_TX_Q0 | \
+                                ALX_ISR_TX_Q1 | \
+                                ALX_ISR_TX_Q2 | \
+                                ALX_ISR_TX_Q3 | \
+                                ALX_ISR_RX_Q0 | \
+                                ALX_ISR_RX_Q1 | \
+                                ALX_ISR_RX_Q2 | \
+                                ALX_ISR_RX_Q3 | \
+                                ALX_ISR_RX_Q4 | \
+                                ALX_ISR_RX_Q5 | \
+                                ALX_ISR_RX_Q6 | \
+                                ALX_ISR_RX_Q7)
+
+/* maximum interrupt vectors for msix */
+#define ALX_MAX_MSIX_INTRS     16
+
+#define ALX_GET_FIELD(_data, _field)                                   \
+       (((_data) >> _field ## _SHIFT) & _field ## _MASK)
+
+#define ALX_SET_FIELD(_data, _field, _value)   do {                    \
+               (_data) &= ~(_field ## _MASK << _field ## _SHIFT);      \
+               (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
+       } while (0)
+
+struct alx_hw {
+       struct pci_dev *pdev;
+       u8 __iomem *hw_addr;
+
+       /* current & permanent mac addr */
+       u8 mac_addr[ETH_ALEN];
+       u8 perm_addr[ETH_ALEN];
+
+       u16 mtu;
+       u16 imt;
+       u8 dma_chnl;
+       u8 max_dma_chnl;
+       /* tpd threshold to trig INT */
+       u32 ith_tpd;
+       u32 rx_ctrl;
+       u32 mc_hash[2];
+
+       u32 smb_timer;
+       /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
+       int link_speed;
+
+       /* auto-neg advertisement or force mode config */
+       u32 adv_cfg;
+       u8 flowctrl;
+
+       u32 sleep_ctrl;
+
+       spinlock_t mdio_lock;
+       struct mdio_if_info mdio;
+       u16 phy_id[2];
+
+       /* PHY link patch flag */
+       bool lnk_patch;
+};
+
+static inline int alx_hw_revision(struct alx_hw *hw)
+{
+       return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
+}
+
+static inline bool alx_hw_with_cr(struct alx_hw *hw)
+{
+       return hw->pdev->revision & 1;
+}
+
+static inline bool alx_hw_giga(struct alx_hw *hw)
+{
+       return hw->pdev->device & 1;
+}
+
+static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
+{
+       writeb(val, hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
+{
+       writew(val, hw->hw_addr + reg);
+}
+
+static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
+{
+       return readw(hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
+{
+       writel(val, hw->hw_addr + reg);
+}
+
+static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
+{
+       return readl(hw->hw_addr + reg);
+}
+
+static inline void alx_post_write(struct alx_hw *hw)
+{
+       readl(hw->hw_addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+void alx_reset_phy(struct alx_hw *hw);
+void alx_reset_pcie(struct alx_hw *hw);
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
+void alx_post_phy_link(struct alx_hw *hw);
+int alx_pre_suspend(struct alx_hw *hw, int speed);
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
+int alx_get_phy_link(struct alx_hw *hw, int *speed);
+int alx_clear_phy_intr(struct alx_hw *hw);
+int alx_config_wol(struct alx_hw *hw);
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
+void alx_start_mac(struct alx_hw *hw);
+int alx_reset_mac(struct alx_hw *hw);
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
+bool alx_phy_configured(struct alx_hw *hw);
+void alx_configure_basic(struct alx_hw *hw);
+void alx_disable_rss(struct alx_hw *hw);
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
+bool alx_get_phy_info(struct alx_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644 (file)
index 0000000..418de8b
--- /dev/null
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ip6_checksum.h>
+#include <linux/crc32.h>
+#include "alx.h"
+#include "hw.h"
+#include "reg.h"
+
+const char alx_drv_name[] = "alx";
+
+
+static void alx_free_txbuf(struct alx_priv *alx, int entry)
+{
+       struct alx_buffer *txb = &alx->txq.bufs[entry];
+
+       if (dma_unmap_len(txb, size)) {
+               dma_unmap_single(&alx->hw.pdev->dev,
+                                dma_unmap_addr(txb, dma),
+                                dma_unmap_len(txb, size),
+                                DMA_TO_DEVICE);
+               dma_unmap_len_set(txb, size, 0);
+       }
+
+       if (txb->skb) {
+               dev_kfree_skb_any(txb->skb);
+               txb->skb = NULL;
+       }
+}
+
+static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
+{
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct sk_buff *skb;
+       struct alx_buffer *cur_buf;
+       dma_addr_t dma;
+       u16 cur, next, count = 0;
+
+       next = cur = rxq->write_idx;
+       if (++next == alx->rx_ringsz)
+               next = 0;
+       cur_buf = &rxq->bufs[cur];
+
+       while (!cur_buf->skb && next != rxq->read_idx) {
+               struct alx_rfd *rfd = &rxq->rfd[cur];
+
+               skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+               if (!skb)
+                       break;
+               dma = dma_map_single(&alx->hw.pdev->dev,
+                                    skb->data, alx->rxbuf_size,
+                                    DMA_FROM_DEVICE);
+               if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
+                       dev_kfree_skb(skb);
+                       break;
+               }
+
+               /* Unfortunately, RX descriptor buffers must be 4-byte
+                * aligned, so we can't use IP alignment.
+                */
+               if (WARN_ON(dma & 3)) {
+                       dev_kfree_skb(skb);
+                       break;
+               }
+
+               cur_buf->skb = skb;
+               dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
+               dma_unmap_addr_set(cur_buf, dma, dma);
+               rfd->addr = cpu_to_le64(dma);
+
+               cur = next;
+               if (++next == alx->rx_ringsz)
+                       next = 0;
+               cur_buf = &rxq->bufs[cur];
+               count++;
+       }
+
+       if (count) {
+               /* flush all updates before updating hardware */
+               wmb();
+               rxq->write_idx = cur;
+               alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
+       }
+
+       return count;
+}
+
+static inline int alx_tpd_avail(struct alx_priv *alx)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+
+       if (txq->write_idx >= txq->read_idx)
+               return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
+       return txq->read_idx - txq->write_idx - 1;
+}
+
+static bool alx_clean_tx_irq(struct alx_priv *alx)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+       u16 hw_read_idx, sw_read_idx;
+       unsigned int total_bytes = 0, total_packets = 0;
+       int budget = ALX_DEFAULT_TX_WORK;
+
+       sw_read_idx = txq->read_idx;
+       hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
+
+       if (sw_read_idx != hw_read_idx) {
+               while (sw_read_idx != hw_read_idx && budget > 0) {
+                       struct sk_buff *skb;
+
+                       skb = txq->bufs[sw_read_idx].skb;
+                       if (skb) {
+                               total_bytes += skb->len;
+                               total_packets++;
+                               budget--;
+                       }
+
+                       alx_free_txbuf(alx, sw_read_idx);
+
+                       if (++sw_read_idx == alx->tx_ringsz)
+                               sw_read_idx = 0;
+               }
+               txq->read_idx = sw_read_idx;
+
+               netdev_completed_queue(alx->dev, total_packets, total_bytes);
+       }
+
+       if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
+           alx_tpd_avail(alx) > alx->tx_ringsz/4)
+               netif_wake_queue(alx->dev);
+
+       return sw_read_idx == hw_read_idx;
+}
+
+static void alx_schedule_link_check(struct alx_priv *alx)
+{
+       schedule_work(&alx->link_check_wk);
+}
+
+static void alx_schedule_reset(struct alx_priv *alx)
+{
+       schedule_work(&alx->reset_wk);
+}
+
+static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+{
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct alx_rrd *rrd;
+       struct alx_buffer *rxb;
+       struct sk_buff *skb;
+       u16 length, rfd_cleaned = 0;
+
+       while (budget > 0) {
+               rrd = &rxq->rrd[rxq->rrd_read_idx];
+               if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+                       break;
+               rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
+
+               if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+                                 RRD_SI) != rxq->read_idx ||
+                   ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+                                 RRD_NOR) != 1) {
+                       alx_schedule_reset(alx);
+                       return 0;
+               }
+
+               rxb = &rxq->bufs[rxq->read_idx];
+               dma_unmap_single(&alx->hw.pdev->dev,
+                                dma_unmap_addr(rxb, dma),
+                                dma_unmap_len(rxb, size),
+                                DMA_FROM_DEVICE);
+               dma_unmap_len_set(rxb, size, 0);
+               skb = rxb->skb;
+               rxb->skb = NULL;
+
+               if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
+                   rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
+                       rrd->word3 = 0;
+                       dev_kfree_skb_any(skb);
+                       goto next_pkt;
+               }
+
+               length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
+                                      RRD_PKTLEN) - ETH_FCS_LEN;
+               skb_put(skb, length);
+               skb->protocol = eth_type_trans(skb, alx->dev);
+
+               skb_checksum_none_assert(skb);
+               if (alx->dev->features & NETIF_F_RXCSUM &&
+                   !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
+                                   cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
+                       switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
+                                             RRD_PID)) {
+                       case RRD_PID_IPV6UDP:
+                       case RRD_PID_IPV4UDP:
+                       case RRD_PID_IPV4TCP:
+                       case RRD_PID_IPV6TCP:
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               break;
+                       }
+               }
+
+               napi_gro_receive(&alx->napi, skb);
+               budget--;
+
+next_pkt:
+               if (++rxq->read_idx == alx->rx_ringsz)
+                       rxq->read_idx = 0;
+               if (++rxq->rrd_read_idx == alx->rx_ringsz)
+                       rxq->rrd_read_idx = 0;
+
+               if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
+                       rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
+       }
+
+       if (rfd_cleaned)
+               alx_refill_rx_ring(alx, GFP_ATOMIC);
+
+       return budget > 0;
+}
+
+static int alx_poll(struct napi_struct *napi, int budget)
+{
+       struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+       struct alx_hw *hw = &alx->hw;
+       bool complete = true;
+       unsigned long flags;
+
+       complete = alx_clean_tx_irq(alx) &&
+                  alx_clean_rx_irq(alx, budget);
+
+       if (!complete)
+               return 1;
+
+       napi_complete(&alx->napi);
+
+       /* enable interrupt */
+       spin_lock_irqsave(&alx->irq_lock, flags);
+       alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+       alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+       spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+       alx_post_write(hw);
+
+       return 0;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+       struct alx_hw *hw = &alx->hw;
+       bool write_int_mask = false;
+
+       spin_lock(&alx->irq_lock);
+
+       /* ACK interrupt */
+       alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+       intr &= alx->int_mask;
+
+       if (intr & ALX_ISR_FATAL) {
+               netif_warn(alx, hw, alx->dev,
+                          "fatal interrupt 0x%x, resetting\n", intr);
+               alx_schedule_reset(alx);
+               goto out;
+       }
+
+       if (intr & ALX_ISR_ALERT)
+               netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
+
+       if (intr & ALX_ISR_PHY) {
+               /* suppress PHY interrupt, because the source
+                * is from PHY internal. only the internal status
+                * is cleared, the interrupt status could be cleared.
+                */
+               alx->int_mask &= ~ALX_ISR_PHY;
+               write_int_mask = true;
+               alx_schedule_link_check(alx);
+       }
+
+       if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
+               napi_schedule(&alx->napi);
+               /* mask rx/tx interrupt, enable them when napi complete */
+               alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+               write_int_mask = true;
+       }
+
+       if (write_int_mask)
+               alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+
+       alx_write_mem32(hw, ALX_ISR, 0);
+
+ out:
+       spin_unlock(&alx->irq_lock);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msi(int irq, void *data)
+{
+       struct alx_priv *alx = data;
+
+       return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
+}
+
+static irqreturn_t alx_intr_legacy(int irq, void *data)
+{
+       struct alx_priv *alx = data;
+       struct alx_hw *hw = &alx->hw;
+       u32 intr;
+
+       intr = alx_read_mem32(hw, ALX_ISR);
+
+       if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
+               return IRQ_NONE;
+
+       return alx_intr_handle(alx, intr);
+}
+
+static void alx_init_ring_ptrs(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+       u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
+
+       alx->rxq.read_idx = 0;
+       alx->rxq.write_idx = 0;
+       alx->rxq.rrd_read_idx = 0;
+       alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
+       alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
+       alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
+       alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
+       alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
+       alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
+
+       alx->txq.read_idx = 0;
+       alx->txq.write_idx = 0;
+       alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
+       alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
+       alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
+
+       /* load these pointers into the chip */
+       alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
+}
+
+static void alx_free_txring_buf(struct alx_priv *alx)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+       int i;
+
+       if (!txq->bufs)
+               return;
+
+       for (i = 0; i < alx->tx_ringsz; i++)
+               alx_free_txbuf(alx, i);
+
+       memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
+       memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
+       txq->write_idx = 0;
+       txq->read_idx = 0;
+
+       netdev_reset_queue(alx->dev);
+}
+
+static void alx_free_rxring_buf(struct alx_priv *alx)
+{
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct alx_buffer *cur_buf;
+       u16 i;
+
+       if (rxq == NULL)
+               return;
+
+       for (i = 0; i < alx->rx_ringsz; i++) {
+               cur_buf = rxq->bufs + i;
+               if (cur_buf->skb) {
+                       dma_unmap_single(&alx->hw.pdev->dev,
+                                        dma_unmap_addr(cur_buf, dma),
+                                        dma_unmap_len(cur_buf, size),
+                                        DMA_FROM_DEVICE);
+                       dev_kfree_skb(cur_buf->skb);
+                       cur_buf->skb = NULL;
+                       dma_unmap_len_set(cur_buf, size, 0);
+                       dma_unmap_addr_set(cur_buf, dma, 0);
+               }
+       }
+
+       rxq->write_idx = 0;
+       rxq->read_idx = 0;
+       rxq->rrd_read_idx = 0;
+}
+
+static void alx_free_buffers(struct alx_priv *alx)
+{
+       alx_free_txring_buf(alx);
+       alx_free_rxring_buf(alx);
+}
+
+static int alx_reinit_rings(struct alx_priv *alx)
+{
+       alx_free_buffers(alx);
+
+       alx_init_ring_ptrs(alx);
+
+       if (!alx_refill_rx_ring(alx, GFP_KERNEL))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
+{
+       u32 crc32, bit, reg;
+
+       crc32 = ether_crc(ETH_ALEN, addr);
+       reg = (crc32 >> 31) & 0x1;
+       bit = (crc32 >> 26) & 0x1F;
+
+       mc_hash[reg] |= BIT(bit);
+}
+
+static void __alx_set_rx_mode(struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       struct netdev_hw_addr *ha;
+       u32 mc_hash[2] = {};
+
+       if (!(netdev->flags & IFF_ALLMULTI)) {
+               netdev_for_each_mc_addr(ha, netdev)
+                       alx_add_mc_addr(hw, ha->addr, mc_hash);
+
+               alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
+               alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
+       }
+
+       hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
+       if (netdev->flags & IFF_PROMISC)
+               hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
+       if (netdev->flags & IFF_ALLMULTI)
+               hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
+
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_set_rx_mode(struct net_device *netdev)
+{
+       __alx_set_rx_mode(netdev);
+}
+
+static int alx_set_mac_address(struct net_device *netdev, void *data)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       struct sockaddr *addr = data;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       if (netdev->addr_assign_type & NET_ADDR_RANDOM)
+               netdev->addr_assign_type ^= NET_ADDR_RANDOM;
+
+       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+       memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+       alx_set_macaddr(hw, hw->mac_addr);
+
+       return 0;
+}
+
+static int alx_alloc_descriptors(struct alx_priv *alx)
+{
+       alx->txq.bufs = kcalloc(alx->tx_ringsz,
+                               sizeof(struct alx_buffer),
+                               GFP_KERNEL);
+       if (!alx->txq.bufs)
+               return -ENOMEM;
+
+       alx->rxq.bufs = kcalloc(alx->rx_ringsz,
+                               sizeof(struct alx_buffer),
+                               GFP_KERNEL);
+       if (!alx->rxq.bufs)
+               goto out_free;
+
+       /* physical tx/rx ring descriptors
+        *
+        * Allocate them as a single chunk because they must not cross a
+        * 4G boundary (hardware has a single register for high 32 bits
+        * of addresses only)
+        */
+       alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
+                           sizeof(struct alx_rrd) * alx->rx_ringsz +
+                           sizeof(struct alx_rfd) * alx->rx_ringsz;
+       alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
+                                               alx->descmem.size,
+                                               &alx->descmem.dma,
+                                               GFP_KERNEL);
+       if (!alx->descmem.virt)
+               goto out_free;
+
+       alx->txq.tpd = (void *)alx->descmem.virt;
+       alx->txq.tpd_dma = alx->descmem.dma;
+
+       /* alignment requirement for next block */
+       BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
+
+       alx->rxq.rrd =
+               (void *)((u8 *)alx->descmem.virt +
+                        sizeof(struct alx_txd) * alx->tx_ringsz);
+       alx->rxq.rrd_dma = alx->descmem.dma +
+                          sizeof(struct alx_txd) * alx->tx_ringsz;
+
+       /* alignment requirement for next block */
+       BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
+
+       alx->rxq.rfd =
+               (void *)((u8 *)alx->descmem.virt +
+                        sizeof(struct alx_txd) * alx->tx_ringsz +
+                        sizeof(struct alx_rrd) * alx->rx_ringsz);
+       alx->rxq.rfd_dma = alx->descmem.dma +
+                          sizeof(struct alx_txd) * alx->tx_ringsz +
+                          sizeof(struct alx_rrd) * alx->rx_ringsz;
+
+       return 0;
+out_free:
+       kfree(alx->txq.bufs);
+       kfree(alx->rxq.bufs);
+       return -ENOMEM;
+}
+
+static int alx_alloc_rings(struct alx_priv *alx)
+{
+       int err;
+
+       err = alx_alloc_descriptors(alx);
+       if (err)
+               return err;
+
+       alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+       alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+       alx->tx_ringsz = alx->tx_ringsz;
+
+       netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
+
+       alx_reinit_rings(alx);
+       return 0;
+}
+
+static void alx_free_rings(struct alx_priv *alx)
+{
+       netif_napi_del(&alx->napi);
+       alx_free_buffers(alx);
+
+       kfree(alx->txq.bufs);
+       kfree(alx->rxq.bufs);
+
+       dma_free_coherent(&alx->hw.pdev->dev,
+                         alx->descmem.size,
+                         alx->descmem.virt,
+                         alx->descmem.dma);
+}
+
+static void alx_config_vector_mapping(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+       alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
+       alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
+}
+
+static void alx_irq_enable(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       /* level-1 interrupt switch */
+       alx_write_mem32(hw, ALX_ISR, 0);
+       alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+       alx_post_write(hw);
+}
+
+static void alx_irq_disable(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+       alx_write_mem32(hw, ALX_IMR, 0);
+       alx_post_write(hw);
+
+       synchronize_irq(alx->hw.pdev->irq);
+}
+
+static int alx_request_irq(struct alx_priv *alx)
+{
+       struct pci_dev *pdev = alx->hw.pdev;
+       struct alx_hw *hw = &alx->hw;
+       int err;
+       u32 msi_ctrl;
+
+       msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
+
+       if (!pci_enable_msi(alx->hw.pdev)) {
+               alx->msi = true;
+
+               alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
+                               msi_ctrl | ALX_MSI_MASK_SEL_LINE);
+               err = request_irq(pdev->irq, alx_intr_msi, 0,
+                                 alx->dev->name, alx);
+               if (!err)
+                       goto out;
+               /* fall back to legacy interrupt */
+               pci_disable_msi(alx->hw.pdev);
+       }
+
+       alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
+       err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+                         alx->dev->name, alx);
+out:
+       if (!err)
+               alx_config_vector_mapping(alx);
+       return err;
+}
+
+static void alx_free_irq(struct alx_priv *alx)
+{
+       struct pci_dev *pdev = alx->hw.pdev;
+
+       free_irq(pdev->irq, alx);
+
+       if (alx->msi) {
+               pci_disable_msi(alx->hw.pdev);
+               alx->msi = false;
+       }
+}
+
+static int alx_identify_hw(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+       int rev = alx_hw_revision(hw);
+
+       if (rev > ALX_REV_C0)
+               return -EINVAL;
+
+       hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
+
+       return 0;
+}
+
+static int alx_init_sw(struct alx_priv *alx)
+{
+       struct pci_dev *pdev = alx->hw.pdev;
+       struct alx_hw *hw = &alx->hw;
+       int err;
+
+       err = alx_identify_hw(alx);
+       if (err) {
+               dev_err(&pdev->dev, "unrecognized chip, aborting\n");
+               return err;
+       }
+
+       alx->hw.lnk_patch =
+               pdev->device == ALX_DEV_ID_AR8161 &&
+               pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
+               pdev->subsystem_device == 0x0091 &&
+               pdev->revision == 0;
+
+       hw->smb_timer = 400;
+       hw->mtu = alx->dev->mtu;
+       alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
+       alx->tx_ringsz = 256;
+       alx->rx_ringsz = 512;
+       hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
+       hw->imt = 200;
+       alx->int_mask = ALX_ISR_MISC;
+       hw->dma_chnl = hw->max_dma_chnl;
+       hw->ith_tpd = alx->tx_ringsz / 3;
+       hw->link_speed = SPEED_UNKNOWN;
+       hw->adv_cfg = ADVERTISED_Autoneg |
+                     ADVERTISED_10baseT_Half |
+                     ADVERTISED_10baseT_Full |
+                     ADVERTISED_100baseT_Full |
+                     ADVERTISED_100baseT_Half |
+                     ADVERTISED_1000baseT_Full;
+       hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
+
+       hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
+                     ALX_MAC_CTRL_MHASH_ALG_HI5B |
+                     ALX_MAC_CTRL_BRD_EN |
+                     ALX_MAC_CTRL_PCRCE |
+                     ALX_MAC_CTRL_CRCE |
+                     ALX_MAC_CTRL_RXFC_EN |
+                     ALX_MAC_CTRL_TXFC_EN |
+                     7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
+
+       return err;
+}
+
+
+static netdev_features_t alx_fix_features(struct net_device *netdev,
+                                         netdev_features_t features)
+{
+       if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
+               features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+       return features;
+}
+
+static void alx_netif_stop(struct alx_priv *alx)
+{
+       alx->dev->trans_start = jiffies;
+       if (netif_carrier_ok(alx->dev)) {
+               netif_carrier_off(alx->dev);
+               netif_tx_disable(alx->dev);
+               napi_disable(&alx->napi);
+       }
+}
+
+static void alx_halt(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_netif_stop(alx);
+       hw->link_speed = SPEED_UNKNOWN;
+
+       alx_reset_mac(hw);
+
+       /* disable l0s/l1 */
+       alx_enable_aspm(hw, false, false);
+       alx_irq_disable(alx);
+       alx_free_buffers(alx);
+}
+
+static void alx_configure(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_configure_basic(hw);
+       alx_disable_rss(hw);
+       __alx_set_rx_mode(alx->dev);
+
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_activate(struct alx_priv *alx)
+{
+       /* hardware setting lost, restore it */
+       alx_reinit_rings(alx);
+       alx_configure(alx);
+
+       /* clear old interrupts */
+       alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+       alx_irq_enable(alx);
+
+       alx_schedule_link_check(alx);
+}
+
+static void alx_reinit(struct alx_priv *alx)
+{
+       ASSERT_RTNL();
+
+       alx_halt(alx);
+       alx_activate(alx);
+}
+
+static int alx_change_mtu(struct net_device *netdev, int mtu)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+       if ((max_frame < ALX_MIN_FRAME_SIZE) ||
+           (max_frame > ALX_MAX_FRAME_SIZE))
+               return -EINVAL;
+
+       if (netdev->mtu == mtu)
+               return 0;
+
+       netdev->mtu = mtu;
+       alx->hw.mtu = mtu;
+       alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
+                          ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
+       netdev_update_features(netdev);
+       if (netif_running(netdev))
+               alx_reinit(alx);
+       return 0;
+}
+
+static void alx_netif_start(struct alx_priv *alx)
+{
+       netif_tx_wake_all_queues(alx->dev);
+       napi_enable(&alx->napi);
+       netif_carrier_on(alx->dev);
+}
+
+static int __alx_open(struct alx_priv *alx, bool resume)
+{
+       int err;
+
+       if (!resume)
+               netif_carrier_off(alx->dev);
+
+       err = alx_alloc_rings(alx);
+       if (err)
+               return err;
+
+       alx_configure(alx);
+
+       err = alx_request_irq(alx);
+       if (err)
+               goto out_free_rings;
+
+       /* clear old interrupts */
+       alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+       alx_irq_enable(alx);
+
+       if (!resume)
+               netif_tx_start_all_queues(alx->dev);
+
+       alx_schedule_link_check(alx);
+       return 0;
+
+out_free_rings:
+       alx_free_rings(alx);
+       return err;
+}
+
+static void __alx_stop(struct alx_priv *alx)
+{
+       alx_halt(alx);
+       alx_free_irq(alx);
+       alx_free_rings(alx);
+}
+
+static const char *alx_speed_desc(u16 speed)
+{
+       switch (speed) {
+       case SPEED_1000 + DUPLEX_FULL:
+               return "1 Gbps Full";
+       case SPEED_100 + DUPLEX_FULL:
+               return "100 Mbps Full";
+       case SPEED_100 + DUPLEX_HALF:
+               return "100 Mbps Half";
+       case SPEED_10 + DUPLEX_FULL:
+               return "10 Mbps Full";
+       case SPEED_10 + DUPLEX_HALF:
+               return "10 Mbps Half";
+       default:
+               return "Unknown speed";
+       }
+}
+
+static void alx_check_link(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+       unsigned long flags;
+       int speed, old_speed;
+       int err;
+
+       /* clear PHY internal interrupt status, otherwise the main
+        * interrupt status will be asserted forever
+        */
+       alx_clear_phy_intr(hw);
+
+       err = alx_get_phy_link(hw, &speed);
+       if (err < 0)
+               goto reset;
+
+       spin_lock_irqsave(&alx->irq_lock, flags);
+       alx->int_mask |= ALX_ISR_PHY;
+       alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+       spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+       old_speed = hw->link_speed;
+
+       if (old_speed == speed)
+               return;
+       hw->link_speed = speed;
+
+       if (speed != SPEED_UNKNOWN) {
+               netif_info(alx, link, alx->dev,
+                          "NIC Up: %s\n", alx_speed_desc(speed));
+               alx_post_phy_link(hw);
+               alx_enable_aspm(hw, true, true);
+               alx_start_mac(hw);
+
+               if (old_speed == SPEED_UNKNOWN)
+                       alx_netif_start(alx);
+       } else {
+               /* link is now down */
+               alx_netif_stop(alx);
+               netif_info(alx, link, alx->dev, "Link Down\n");
+               err = alx_reset_mac(hw);
+               if (err)
+                       goto reset;
+               alx_irq_disable(alx);
+
+               /* MAC reset causes all HW settings to be lost, restore all */
+               err = alx_reinit_rings(alx);
+               if (err)
+                       goto reset;
+               alx_configure(alx);
+               alx_enable_aspm(hw, false, true);
+               alx_post_phy_link(hw);
+               alx_irq_enable(alx);
+       }
+
+       return;
+
+reset:
+       alx_schedule_reset(alx);
+}
+
+static int alx_open(struct net_device *netdev)
+{
+       return __alx_open(netdev_priv(netdev), false);
+}
+
+static int alx_stop(struct net_device *netdev)
+{
+       __alx_stop(netdev_priv(netdev));
+       return 0;
+}
+
+static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+       struct alx_hw *hw = &alx->hw;
+       int err, speed;
+
+       netif_device_detach(netdev);
+
+       if (netif_running(netdev))
+               __alx_stop(alx);
+
+#ifdef CONFIG_PM_SLEEP
+       err = pci_save_state(pdev);
+       if (err)
+               return err;
+#endif
+
+       err = alx_select_powersaving_speed(hw, &speed);
+       if (err)
+               return err;
+       err = alx_clear_phy_intr(hw);
+       if (err)
+               return err;
+       err = alx_pre_suspend(hw, speed);
+       if (err)
+               return err;
+       err = alx_config_wol(hw);
+       if (err)
+               return err;
+
+       *wol_en = false;
+       if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
+               netif_info(alx, wol, netdev,
+                          "wol: ctrl=%X, speed=%X\n",
+                          hw->sleep_ctrl, speed);
+               device_set_wakeup_enable(&pdev->dev, true);
+               *wol_en = true;
+       }
+
+       pci_disable_device(pdev);
+
+       return 0;
+}
+
+static void alx_shutdown(struct pci_dev *pdev)
+{
+       int err;
+       bool wol_en;
+
+       err = __alx_shutdown(pdev, &wol_en);
+       if (!err) {
+               pci_wake_from_d3(pdev, wol_en);
+               pci_set_power_state(pdev, PCI_D3hot);
+       } else {
+               dev_err(&pdev->dev, "shutdown fail %d\n", err);
+       }
+}
+
+static void alx_link_check(struct work_struct *work)
+{
+       struct alx_priv *alx;
+
+       alx = container_of(work, struct alx_priv, link_check_wk);
+
+       rtnl_lock();
+       alx_check_link(alx);
+       rtnl_unlock();
+}
+
+static void alx_reset(struct work_struct *work)
+{
+       struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
+
+       rtnl_lock();
+       alx_reinit(alx);
+       rtnl_unlock();
+}
+
+static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
+{
+       u8 cso, css;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       cso = skb_checksum_start_offset(skb);
+       if (cso & 1)
+               return -EINVAL;
+
+       css = cso + skb->csum_offset;
+       first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
+       first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
+       first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
+
+       return 0;
+}
+
+static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+       struct alx_txd *tpd, *first_tpd;
+       dma_addr_t dma;
+       int maplen, f, first_idx = txq->write_idx;
+
+       first_tpd = &txq->tpd[txq->write_idx];
+       tpd = first_tpd;
+
+       maplen = skb_headlen(skb);
+       dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
+                            DMA_TO_DEVICE);
+       if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+               goto err_dma;
+
+       dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+       dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+       tpd->adrl.addr = cpu_to_le64(dma);
+       tpd->len = cpu_to_le16(maplen);
+
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+               struct skb_frag_struct *frag;
+
+               frag = &skb_shinfo(skb)->frags[f];
+
+               if (++txq->write_idx == alx->tx_ringsz)
+                       txq->write_idx = 0;
+               tpd = &txq->tpd[txq->write_idx];
+
+               tpd->word1 = first_tpd->word1;
+
+               maplen = skb_frag_size(frag);
+               dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
+                                      maplen, DMA_TO_DEVICE);
+               if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+                       goto err_dma;
+               dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+               dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+               tpd->adrl.addr = cpu_to_le64(dma);
+               tpd->len = cpu_to_le16(maplen);
+       }
+
+       /* last TPD, set EOP flag and store skb */
+       tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
+       txq->bufs[txq->write_idx].skb = skb;
+
+       if (++txq->write_idx == alx->tx_ringsz)
+               txq->write_idx = 0;
+
+       return 0;
+
+err_dma:
+       f = first_idx;
+       while (f != txq->write_idx) {
+               alx_free_txbuf(alx, f);
+               if (++f == alx->tx_ringsz)
+                       f = 0;
+       }
+       return -ENOMEM;
+}
+
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+                                 struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_tx_queue *txq = &alx->txq;
+       struct alx_txd *first;
+       int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+
+       if (alx_tpd_avail(alx) < tpdreq) {
+               netif_stop_queue(alx->dev);
+               goto drop;
+       }
+
+       first = &txq->tpd[txq->write_idx];
+       memset(first, 0, sizeof(*first));
+
+       if (alx_tx_csum(skb, first))
+               goto drop;
+
+       if (alx_map_tx_skb(alx, skb) < 0)
+               goto drop;
+
+       netdev_sent_queue(alx->dev, skb->len);
+
+       /* flush updates before updating hardware */
+       wmb();
+       alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
+
+       if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
+               netif_stop_queue(alx->dev);
+
+       return NETDEV_TX_OK;
+
+drop:
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static void alx_tx_timeout(struct net_device *dev)
+{
+       struct alx_priv *alx = netdev_priv(dev);
+
+       alx_schedule_reset(alx);
+}
+
+static int alx_mdio_read(struct net_device *netdev,
+                        int prtad, int devad, u16 addr)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       u16 val;
+       int err;
+
+       if (prtad != hw->mdio.prtad)
+               return -EINVAL;
+
+       if (devad == MDIO_DEVAD_NONE)
+               err = alx_read_phy_reg(hw, addr, &val);
+       else
+               err = alx_read_phy_ext(hw, devad, addr, &val);
+
+       if (err)
+               return err;
+       return val;
+}
+
+static int alx_mdio_write(struct net_device *netdev,
+                         int prtad, int devad, u16 addr, u16 val)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       if (prtad != hw->mdio.prtad)
+               return -EINVAL;
+
+       if (devad == MDIO_DEVAD_NONE)
+               return alx_write_phy_reg(hw, addr, val);
+
+       return alx_write_phy_ext(hw, devad, addr, val);
+}
+
+static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       if (!netif_running(netdev))
+               return -EAGAIN;
+
+       return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void alx_poll_controller(struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       if (alx->msi)
+               alx_intr_msi(0, alx);
+       else
+               alx_intr_legacy(0, alx);
+}
+#endif
+
+static const struct net_device_ops alx_netdev_ops = {
+       .ndo_open               = alx_open,
+       .ndo_stop               = alx_stop,
+       .ndo_start_xmit         = alx_start_xmit,
+       .ndo_set_rx_mode        = alx_set_rx_mode,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = alx_set_mac_address,
+       .ndo_change_mtu         = alx_change_mtu,
+       .ndo_do_ioctl           = alx_ioctl,
+       .ndo_tx_timeout         = alx_tx_timeout,
+       .ndo_fix_features       = alx_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = alx_poll_controller,
+#endif
+};
+
+static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct net_device *netdev;
+       struct alx_priv *alx;
+       struct alx_hw *hw;
+       bool phy_configured;
+       int bars, pm_cap, err;
+
+       err = pci_enable_device_mem(pdev);
+       if (err)
+               return err;
+
+       /* The alx chip can DMA to 64-bit addresses, but it uses a single
+        * shared register for the high 32 bits, so only a single, aligned,
+        * 4 GB physical address range can be used for descriptors.
+        */
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+               dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
+       } else {
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               if (err) {
+                       err = dma_set_coherent_mask(&pdev->dev,
+                                                   DMA_BIT_MASK(32));
+                       if (err) {
+                               dev_err(&pdev->dev,
+                                       "No usable DMA config, aborting\n");
+                               goto out_pci_disable;
+                       }
+               }
+       }
+
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "pci_request_selected_regions failed(bars:%d)\n", bars);
+               goto out_pci_disable;
+       }
+
+       pci_enable_pcie_error_reporting(pdev);
+       pci_set_master(pdev);
+
+       pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+       if (pm_cap == 0) {
+               dev_err(&pdev->dev,
+                       "Can't find power management capability, aborting\n");
+               err = -EIO;
+               goto out_pci_release;
+       }
+
+       err = pci_set_power_state(pdev, PCI_D0);
+       if (err)
+               goto out_pci_release;
+
+       netdev = alloc_etherdev(sizeof(*alx));
+       if (!netdev) {
+               err = -ENOMEM;
+               goto out_pci_release;
+       }
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+       alx = netdev_priv(netdev);
+       alx->dev = netdev;
+       alx->hw.pdev = pdev;
+       alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
+                         NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
+       hw = &alx->hw;
+       pci_set_drvdata(pdev, alx);
+
+       hw->hw_addr = pci_ioremap_bar(pdev, 0);
+       if (!hw->hw_addr) {
+               dev_err(&pdev->dev, "cannot map device registers\n");
+               err = -EIO;
+               goto out_free_netdev;
+       }
+
+       netdev->netdev_ops = &alx_netdev_ops;
+       SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+       netdev->irq = pdev->irq;
+       netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
+
+       if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
+               pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+
+       err = alx_init_sw(alx);
+       if (err) {
+               dev_err(&pdev->dev, "net device private data init failed\n");
+               goto out_unmap;
+       }
+
+       alx_reset_pcie(hw);
+
+       phy_configured = alx_phy_configured(hw);
+
+       if (!phy_configured)
+               alx_reset_phy(hw);
+
+       err = alx_reset_mac(hw);
+       if (err) {
+               dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
+               goto out_unmap;
+       }
+
+       /* setup link to put it in a known good starting state */
+       if (!phy_configured) {
+               err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "failed to configure PHY speed/duplex (err=%d)\n",
+                               err);
+                       goto out_unmap;
+               }
+       }
+
+       netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+
+       if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
+               dev_warn(&pdev->dev,
+                        "Invalid permanent address programmed, using random one\n");
+               eth_hw_addr_random(netdev);
+               memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
+       }
+
+       memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
+       memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+       memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
+
+       hw->mdio.prtad = 0;
+       hw->mdio.mmds = 0;
+       hw->mdio.dev = netdev;
+       hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
+                               MDIO_SUPPORTS_C22 |
+                               MDIO_EMULATE_C22;
+       hw->mdio.mdio_read = alx_mdio_read;
+       hw->mdio.mdio_write = alx_mdio_write;
+
+       if (!alx_get_phy_info(hw)) {
+               dev_err(&pdev->dev, "failed to identify PHY\n");
+               err = -EIO;
+               goto out_unmap;
+       }
+
+       INIT_WORK(&alx->link_check_wk, alx_link_check);
+       INIT_WORK(&alx->reset_wk, alx_reset);
+       spin_lock_init(&alx->hw.mdio_lock);
+       spin_lock_init(&alx->irq_lock);
+
+       netif_carrier_off(netdev);
+
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "register netdevice failed\n");
+               goto out_unmap;
+       }
+
+       device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
+
+       netdev_info(netdev,
+                   "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
+                   netdev->dev_addr);
+
+       return 0;
+
+out_unmap:
+       iounmap(hw->hw_addr);
+out_free_netdev:
+       free_netdev(netdev);
+out_pci_release:
+       pci_release_selected_regions(pdev, bars);
+out_pci_disable:
+       pci_disable_device(pdev);
+       return err;
+}
+
+static void alx_remove(struct pci_dev *pdev)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct alx_hw *hw = &alx->hw;
+
+       cancel_work_sync(&alx->link_check_wk);
+       cancel_work_sync(&alx->reset_wk);
+
+       /* restore permanent mac address */
+       alx_set_macaddr(hw, hw->perm_addr);
+
+       unregister_netdev(alx->dev);
+       iounmap(hw->hw_addr);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
+
+       pci_disable_pcie_error_reporting(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       free_netdev(alx->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int alx_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int err;
+       bool wol_en;
+
+       err = __alx_shutdown(pdev, &wol_en);
+       if (err) {
+               dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
+               return err;
+       }
+
+       if (wol_en) {
+               pci_prepare_to_sleep(pdev);
+       } else {
+               pci_wake_from_d3(pdev, false);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+
+       return 0;
+}
+
+static int alx_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+       struct alx_hw *hw = &alx->hw;
+       int err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_save_state(pdev);
+
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
+
+       hw->link_speed = SPEED_UNKNOWN;
+       alx->int_mask = ALX_ISR_MISC;
+
+       alx_reset_pcie(hw);
+       alx_reset_phy(hw);
+
+       err = alx_reset_mac(hw);
+       if (err) {
+               netif_err(alx, hw, alx->dev,
+                         "resume:reset_mac fail %d\n", err);
+               return -EIO;
+       }
+
+       err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+       if (err) {
+               netif_err(alx, hw, alx->dev,
+                         "resume:setup_speed_duplex fail %d\n", err);
+               return -EIO;
+       }
+
+       if (netif_running(netdev)) {
+               err = __alx_open(alx, true);
+               if (err)
+                       return err;
+       }
+
+       netif_device_attach(netdev);
+
+       return err;
+}
+#endif
+
+static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+                                              pci_channel_state_t state)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+       pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
+
+       dev_info(&pdev->dev, "pci error detected\n");
+
+       rtnl_lock();
+
+       if (netif_running(netdev)) {
+               netif_device_detach(netdev);
+               alx_halt(alx);
+       }
+
+       if (state == pci_channel_io_perm_failure)
+               rc = PCI_ERS_RESULT_DISCONNECT;
+       else
+               pci_disable_device(pdev);
+
+       rtnl_unlock();
+
+       return rc;
+}
+
+static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct alx_hw *hw = &alx->hw;
+       pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+       dev_info(&pdev->dev, "pci error slot reset\n");
+
+       rtnl_lock();
+
+       if (pci_enable_device(pdev)) {
+               dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
+               goto out;
+       }
+
+       pci_set_master(pdev);
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
+
+       alx_reset_pcie(hw);
+       if (!alx_reset_mac(hw))
+               rc = PCI_ERS_RESULT_RECOVERED;
+out:
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+
+       rtnl_unlock();
+
+       return rc;
+}
+
+static void alx_pci_error_resume(struct pci_dev *pdev)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+
+       dev_info(&pdev->dev, "pci error resume\n");
+
+       rtnl_lock();
+
+       if (netif_running(netdev)) {
+               alx_activate(alx);
+               netif_device_attach(netdev);
+       }
+
+       rtnl_unlock();
+}
+
+static const struct pci_error_handlers alx_err_handlers = {
+       .error_detected = alx_pci_error_detected,
+       .slot_reset     = alx_pci_error_slot_reset,
+       .resume         = alx_pci_error_resume,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+#define ALX_PM_OPS      (&alx_pm_ops)
+#else
+#define ALX_PM_OPS      NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
+       {}
+};
+
+static struct pci_driver alx_driver = {
+       .name        = alx_drv_name,
+       .id_table    = alx_pci_tbl,
+       .probe       = alx_probe,
+       .remove      = alx_remove,
+       .shutdown    = alx_shutdown,
+       .err_handler = &alx_err_handlers,
+       .driver.pm   = ALX_PM_OPS,
+};
+
+module_pci_driver(alx_driver);
+MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION(
+       "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644 (file)
index 0000000..e4358c9
--- /dev/null
@@ -0,0 +1,810 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_REG_H
+#define ALX_REG_H
+
+#define ALX_DEV_ID_AR8161                              0x1091
+#define ALX_DEV_ID_E2200                               0xe091
+#define ALX_DEV_ID_AR8162                              0x1090
+#define ALX_DEV_ID_AR8171                              0x10A1
+#define ALX_DEV_ID_AR8172                              0x10A0
+
+/* rev definition,
+ * bit(0): with xD support
+ * bit(1): with Card Reader function
+ * bit(7:2): real revision
+ */
+#define ALX_PCI_REVID_SHIFT                            3
+#define ALX_REV_A0                                     0
+#define ALX_REV_A1                                     1
+#define ALX_REV_B0                                     2
+#define ALX_REV_C0                                     3
+
+#define ALX_DEV_CTRL                                   0x0060
+#define ALX_DEV_CTRL_MAXRRS_MIN                                2
+
+#define ALX_MSIX_MASK                                  0x0090
+
+#define ALX_UE_SVRT                                    0x010C
+#define ALX_UE_SVRT_FCPROTERR                          BIT(13)
+#define ALX_UE_SVRT_DLPROTERR                          BIT(4)
+
+/* eeprom & flash load register */
+#define ALX_EFLD                                       0x0204
+#define ALX_EFLD_F_EXIST                               BIT(10)
+#define ALX_EFLD_E_EXIST                               BIT(9)
+#define ALX_EFLD_STAT                                  BIT(5)
+#define ALX_EFLD_START                                 BIT(0)
+
+/* eFuse load register */
+#define ALX_SLD                                                0x0218
+#define ALX_SLD_STAT                                   BIT(12)
+#define ALX_SLD_START                                  BIT(11)
+#define ALX_SLD_MAX_TO                                 100
+
+#define ALX_PDLL_TRNS1                                 0x1104
+#define ALX_PDLL_TRNS1_D3PLLOFF_EN                     BIT(11)
+
+#define ALX_PMCTRL                                     0x12F8
+#define ALX_PMCTRL_HOTRST_WTEN                         BIT(31)
+/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
+#define ALX_PMCTRL_ASPM_FCEN                           BIT(30)
+#define ALX_PMCTRL_SADLY_EN                            BIT(29)
+#define ALX_PMCTRL_LCKDET_TIMER_MASK                   0xF
+#define ALX_PMCTRL_LCKDET_TIMER_SHIFT                  24
+#define ALX_PMCTRL_LCKDET_TIMER_DEF                    0xC
+/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
+#define ALX_PMCTRL_L1REQ_TO_MASK                       0xF
+#define ALX_PMCTRL_L1REQ_TO_SHIFT                      20
+#define ALX_PMCTRL_L1REG_TO_DEF                                0xF
+#define ALX_PMCTRL_TXL1_AFTER_L0S                      BIT(19)
+#define ALX_PMCTRL_L1_TIMER_MASK                       0x7
+#define ALX_PMCTRL_L1_TIMER_SHIFT                      16
+#define ALX_PMCTRL_L1_TIMER_16US                       4
+#define ALX_PMCTRL_RCVR_WT_1US                         BIT(15)
+/* bit13: enable pcie clk switch in L1 state */
+#define ALX_PMCTRL_L1_CLKSW_EN                         BIT(13)
+#define ALX_PMCTRL_L0S_EN                              BIT(12)
+#define ALX_PMCTRL_RXL1_AFTER_L0S                      BIT(11)
+#define ALX_PMCTRL_L1_BUFSRX_EN                                BIT(7)
+/* bit6: power down serdes RX */
+#define ALX_PMCTRL_L1_SRDSRX_PWD                       BIT(6)
+#define ALX_PMCTRL_L1_SRDSPLL_EN                       BIT(5)
+#define ALX_PMCTRL_L1_SRDS_EN                          BIT(4)
+#define ALX_PMCTRL_L1_EN                               BIT(3)
+
+/*******************************************************/
+/* following registers are mapped only to memory space */
+/*******************************************************/
+
+#define ALX_MASTER                                     0x1400
+/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
+#define ALX_MASTER_PCLKSEL_SRDS                                BIT(12)
+/* bit11: irq moduration for rx */
+#define ALX_MASTER_IRQMOD2_EN                          BIT(11)
+/* bit10: irq moduration for tx/rx */
+#define ALX_MASTER_IRQMOD1_EN                          BIT(10)
+#define ALX_MASTER_SYSALVTIMER_EN                      BIT(7)
+#define ALX_MASTER_OOB_DIS                             BIT(6)
+/* bit5: wakeup without pcie clk */
+#define ALX_MASTER_WAKEN_25M                           BIT(5)
+/* bit0: MAC & DMA reset */
+#define ALX_MASTER_DMA_MAC_RST                         BIT(0)
+#define ALX_DMA_MAC_RST_TO                             50
+
+#define ALX_IRQ_MODU_TIMER                             0x1408
+#define ALX_IRQ_MODU_TIMER1_MASK                       0xFFFF
+#define ALX_IRQ_MODU_TIMER1_SHIFT                      0
+
+#define ALX_PHY_CTRL                                   0x140C
+#define ALX_PHY_CTRL_100AB_EN                          BIT(17)
+/* bit14: affect MAC & PHY, go to low power sts */
+#define ALX_PHY_CTRL_POWER_DOWN                                BIT(14)
+/* bit13: 1:pll always ON, 0:can switch in lpw */
+#define ALX_PHY_CTRL_PLL_ON                            BIT(13)
+#define ALX_PHY_CTRL_RST_ANALOG                                BIT(12)
+#define ALX_PHY_CTRL_HIB_PULSE                         BIT(11)
+#define ALX_PHY_CTRL_HIB_EN                            BIT(10)
+#define ALX_PHY_CTRL_IDDQ                              BIT(7)
+#define ALX_PHY_CTRL_GATE_25M                          BIT(5)
+#define ALX_PHY_CTRL_LED_MODE                          BIT(2)
+/* bit0: out of dsp RST state */
+#define ALX_PHY_CTRL_DSPRST_OUT                                BIT(0)
+#define ALX_PHY_CTRL_DSPRST_TO                         80
+#define ALX_PHY_CTRL_CLS       (ALX_PHY_CTRL_LED_MODE | \
+                                ALX_PHY_CTRL_100AB_EN | \
+                                ALX_PHY_CTRL_PLL_ON)
+
+#define ALX_MAC_STS                                    0x1410
+#define ALX_MAC_STS_TXQ_BUSY                           BIT(3)
+#define ALX_MAC_STS_RXQ_BUSY                           BIT(2)
+#define ALX_MAC_STS_TXMAC_BUSY                         BIT(1)
+#define ALX_MAC_STS_RXMAC_BUSY                         BIT(0)
+#define ALX_MAC_STS_IDLE       (ALX_MAC_STS_TXQ_BUSY | \
+                                ALX_MAC_STS_RXQ_BUSY | \
+                                ALX_MAC_STS_TXMAC_BUSY | \
+                                ALX_MAC_STS_RXMAC_BUSY)
+
+#define ALX_MDIO                                       0x1414
+#define ALX_MDIO_MODE_EXT                              BIT(30)
+#define ALX_MDIO_BUSY                                  BIT(27)
+#define ALX_MDIO_CLK_SEL_MASK                          0x7
+#define ALX_MDIO_CLK_SEL_SHIFT                         24
+#define ALX_MDIO_CLK_SEL_25MD4                         0
+#define ALX_MDIO_CLK_SEL_25MD128                       7
+#define ALX_MDIO_START                                 BIT(23)
+#define ALX_MDIO_SPRES_PRMBL                           BIT(22)
+/* bit21: 1:read,0:write */
+#define ALX_MDIO_OP_READ                               BIT(21)
+#define ALX_MDIO_REG_MASK                              0x1F
+#define ALX_MDIO_REG_SHIFT                             16
+#define ALX_MDIO_DATA_MASK                             0xFFFF
+#define ALX_MDIO_DATA_SHIFT                            0
+#define ALX_MDIO_MAX_AC_TO                             120
+
+#define ALX_MDIO_EXTN                                  0x1448
+#define ALX_MDIO_EXTN_DEVAD_MASK                       0x1F
+#define ALX_MDIO_EXTN_DEVAD_SHIFT                      16
+#define ALX_MDIO_EXTN_REG_MASK                         0xFFFF
+#define ALX_MDIO_EXTN_REG_SHIFT                                0
+
+#define ALX_SERDES                                     0x1424
+#define ALX_SERDES_PHYCLK_SLWDWN                       BIT(18)
+#define ALX_SERDES_MACCLK_SLWDWN                       BIT(17)
+
+#define ALX_LPI_CTRL                                   0x1440
+#define ALX_LPI_CTRL_EN                                        BIT(0)
+
+/* for B0+, bit[13..] for C0+ */
+#define ALX_HRTBT_EXT_CTRL                             0x1AD0
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK            0x3F
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT           24
+#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN         BIT(23)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED           BIT(22)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED           BIT(21)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN         BIT(20)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN             BIT(19)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023              BIT(18)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6              BIT(17)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN         BIT(16)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN             BIT(15)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023              BIT(14)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6              BIT(13)
+#define ALX_HRTBT_EXT_CTRL_NS_EN                       BIT(12)
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK               0xFF
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT              4
+#define ALX_HRTBT_EXT_CTRL_IS_8023                     BIT(3)
+#define ALX_HRTBT_EXT_CTRL_IS_IPV6                     BIT(2)
+#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN                   BIT(1)
+#define ALX_HRTBT_EXT_CTRL_ARP_EN                      BIT(0)
+
+#define ALX_HRTBT_REM_IPV4_ADDR                                0x1AD4
+#define ALX_HRTBT_HOST_IPV4_ADDR                       0x1478
+#define ALX_HRTBT_REM_IPV6_ADDR3                       0x1AD8
+#define ALX_HRTBT_REM_IPV6_ADDR2                       0x1ADC
+#define ALX_HRTBT_REM_IPV6_ADDR1                       0x1AE0
+#define ALX_HRTBT_REM_IPV6_ADDR0                       0x1AE4
+
+/* 1B8C ~ 1B94 for C0+ */
+#define ALX_SWOI_ACER_CTRL                             0x1B8C
+#define ALX_SWOI_ORIG_ACK_NAK_EN                       BIT(20)
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK             0XFF
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT            12
+#define ALX_SWOI_ORIG_ACK_ADDR_MASK                    0XFFF
+#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT                   0
+
+#define ALX_SWOI_IOAC_CTRL_2                           0x1B90
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK      0xFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT     24
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK       0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT      12
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK      0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT     0
+
+#define ALX_SWOI_IOAC_CTRL_3                           0x1B94
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK      0xFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT     24
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK       0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT      12
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK      0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT     0
+
+/* for B0 */
+#define ALX_IDLE_DECISN_TIMER                          0x1474
+/* 1ms */
+#define ALX_IDLE_DECISN_TIMER_DEF                      0x400
+
+#define ALX_MAC_CTRL                                   0x1480
+#define ALX_MAC_CTRL_FAST_PAUSE                                BIT(31)
+#define ALX_MAC_CTRL_WOLSPED_SWEN                      BIT(30)
+/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
+#define ALX_MAC_CTRL_MHASH_ALG_HI5B                    BIT(29)
+#define ALX_MAC_CTRL_BRD_EN                            BIT(26)
+#define ALX_MAC_CTRL_MULTIALL_EN                       BIT(25)
+#define ALX_MAC_CTRL_SPEED_MASK                                0x3
+#define ALX_MAC_CTRL_SPEED_SHIFT                       20
+#define ALX_MAC_CTRL_SPEED_10_100                      1
+#define ALX_MAC_CTRL_SPEED_1000                                2
+#define ALX_MAC_CTRL_PROMISC_EN                                BIT(15)
+#define ALX_MAC_CTRL_VLANSTRIP                         BIT(14)
+#define ALX_MAC_CTRL_PRMBLEN_MASK                      0xF
+#define ALX_MAC_CTRL_PRMBLEN_SHIFT                     10
+#define ALX_MAC_CTRL_PCRCE                             BIT(7)
+#define ALX_MAC_CTRL_CRCE                              BIT(6)
+#define ALX_MAC_CTRL_FULLD                             BIT(5)
+#define ALX_MAC_CTRL_RXFC_EN                           BIT(3)
+#define ALX_MAC_CTRL_TXFC_EN                           BIT(2)
+#define ALX_MAC_CTRL_RX_EN                             BIT(1)
+#define ALX_MAC_CTRL_TX_EN                             BIT(0)
+
+#define ALX_STAD0                                      0x1488
+#define ALX_STAD1                                      0x148C
+
+#define ALX_HASH_TBL0                                  0x1490
+#define ALX_HASH_TBL1                                  0x1494
+
+#define ALX_MTU                                                0x149C
+#define ALX_MTU_JUMBO_TH                               1514
+#define ALX_MTU_STD_ALGN                               1536
+
+#define ALX_SRAM5                                      0x1524
+#define ALX_SRAM_RXF_LEN_MASK                          0xFFF
+#define ALX_SRAM_RXF_LEN_SHIFT                         0
+#define ALX_SRAM_RXF_LEN_8K                            (8*1024)
+
+#define ALX_SRAM9                                      0x1534
+#define ALX_SRAM_LOAD_PTR                              BIT(0)
+
+#define ALX_RX_BASE_ADDR_HI                            0x1540
+
+#define ALX_TX_BASE_ADDR_HI                            0x1544
+
+#define ALX_RFD_ADDR_LO                                        0x1550
+#define ALX_RFD_RING_SZ                                        0x1560
+#define ALX_RFD_BUF_SZ                                 0x1564
+
+#define ALX_RRD_ADDR_LO                                        0x1568
+#define ALX_RRD_RING_SZ                                        0x1578
+
+/* pri3: highest, pri0: lowest */
+#define ALX_TPD_PRI3_ADDR_LO                           0x14E4
+#define ALX_TPD_PRI2_ADDR_LO                           0x14E0
+#define ALX_TPD_PRI1_ADDR_LO                           0x157C
+#define ALX_TPD_PRI0_ADDR_LO                           0x1580
+
+/* producer index is 16bit */
+#define ALX_TPD_PRI3_PIDX                              0x1618
+#define ALX_TPD_PRI2_PIDX                              0x161A
+#define ALX_TPD_PRI1_PIDX                              0x15F0
+#define ALX_TPD_PRI0_PIDX                              0x15F2
+
+/* consumer index is 16bit */
+#define ALX_TPD_PRI3_CIDX                              0x161C
+#define ALX_TPD_PRI2_CIDX                              0x161E
+#define ALX_TPD_PRI1_CIDX                              0x15F4
+#define ALX_TPD_PRI0_CIDX                              0x15F6
+
+#define ALX_TPD_RING_SZ                                        0x1584
+
+#define ALX_TXQ0                                       0x1590
+#define ALX_TXQ0_TXF_BURST_PREF_MASK                   0xFFFF
+#define ALX_TXQ0_TXF_BURST_PREF_SHIFT                  16
+#define ALX_TXQ_TXF_BURST_PREF_DEF                     0x200
+#define ALX_TXQ0_LSO_8023_EN                           BIT(7)
+#define ALX_TXQ0_MODE_ENHANCE                          BIT(6)
+#define ALX_TXQ0_EN                                    BIT(5)
+#define ALX_TXQ0_SUPT_IPOPT                            BIT(4)
+#define ALX_TXQ0_TPD_BURSTPREF_MASK                    0xF
+#define ALX_TXQ0_TPD_BURSTPREF_SHIFT                   0
+#define ALX_TXQ_TPD_BURSTPREF_DEF                      5
+
+#define ALX_TXQ1                                       0x1594
+/* bit11:  drop large packet, len > (rfd buf) */
+#define ALX_TXQ1_ERRLGPKT_DROP_EN                      BIT(11)
+#define ALX_TXQ1_JUMBO_TSO_TH                          (7*1024)
+
+#define ALX_RXQ0                                       0x15A0
+#define ALX_RXQ0_EN                                    BIT(31)
+#define ALX_RXQ0_RSS_HASH_EN                           BIT(29)
+#define ALX_RXQ0_RSS_MODE_MASK                         0x3
+#define ALX_RXQ0_RSS_MODE_SHIFT                                26
+#define ALX_RXQ0_RSS_MODE_DIS                          0
+#define ALX_RXQ0_RSS_MODE_MQMI                         3
+#define ALX_RXQ0_NUM_RFD_PREF_MASK                     0x3F
+#define ALX_RXQ0_NUM_RFD_PREF_SHIFT                    20
+#define ALX_RXQ0_NUM_RFD_PREF_DEF                      8
+#define ALX_RXQ0_IDT_TBL_SIZE_MASK                     0x1FF
+#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT                    8
+#define ALX_RXQ0_IDT_TBL_SIZE_DEF                      0x100
+#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL                   128
+#define ALX_RXQ0_IPV6_PARSE_EN                         BIT(7)
+#define ALX_RXQ0_RSS_HSTYP_MASK                                0xF
+#define ALX_RXQ0_RSS_HSTYP_SHIFT                       2
+#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN                 BIT(5)
+#define ALX_RXQ0_RSS_HSTYP_IPV6_EN                     BIT(4)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN                 BIT(3)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_EN                     BIT(2)
+#define ALX_RXQ0_RSS_HSTYP_ALL         (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
+                                        ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
+                                        ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
+                                        ALX_RXQ0_RSS_HSTYP_IPV4_EN)
+#define ALX_RXQ0_ASPM_THRESH_MASK                      0x3
+#define ALX_RXQ0_ASPM_THRESH_SHIFT                     0
+#define ALX_RXQ0_ASPM_THRESH_100M                      3
+
+#define ALX_RXQ2                                       0x15A8
+#define ALX_RXQ2_RXF_XOFF_THRESH_MASK                  0xFFF
+#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT                 16
+#define ALX_RXQ2_RXF_XON_THRESH_MASK                   0xFFF
+#define ALX_RXQ2_RXF_XON_THRESH_SHIFT                  0
+/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
+ *        rx-packet(1522) + delay-of-link(64)
+ *      = 3212.
+ */
+#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD                    3212
+
+#define ALX_DMA                                                0x15C0
+#define ALX_DMA_RCHNL_SEL_MASK                         0x3
+#define ALX_DMA_RCHNL_SEL_SHIFT                                26
+#define ALX_DMA_WDLY_CNT_MASK                          0xF
+#define ALX_DMA_WDLY_CNT_SHIFT                         16
+#define ALX_DMA_WDLY_CNT_DEF                           4
+#define ALX_DMA_RDLY_CNT_MASK                          0x1F
+#define ALX_DMA_RDLY_CNT_SHIFT                         11
+#define ALX_DMA_RDLY_CNT_DEF                           15
+/* bit10: 0:tpd with pri, 1: data */
+#define ALX_DMA_RREQ_PRI_DATA                          BIT(10)
+#define ALX_DMA_RREQ_BLEN_MASK                         0x7
+#define ALX_DMA_RREQ_BLEN_SHIFT                                4
+#define ALX_DMA_RORDER_MODE_MASK                       0x7
+#define ALX_DMA_RORDER_MODE_SHIFT                      0
+#define ALX_DMA_RORDER_MODE_OUT                                4
+
+#define ALX_WOL0                                       0x14A0
+#define ALX_WOL0_PME_LINK                              BIT(5)
+#define ALX_WOL0_LINK_EN                               BIT(4)
+#define ALX_WOL0_PME_MAGIC_EN                          BIT(3)
+#define ALX_WOL0_MAGIC_EN                              BIT(2)
+
+#define ALX_RFD_PIDX                                   0x15E0
+
+#define ALX_RFD_CIDX                                   0x15F8
+
+/* MIB */
+#define ALX_MIB_BASE                                   0x1700
+#define ALX_MIB_RX_OK                                  (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_ERRADDR                             (ALX_MIB_BASE + 92)
+#define ALX_MIB_TX_OK                                  (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_MCCNT                               (ALX_MIB_BASE + 192)
+
+#define ALX_RX_STATS_BIN                               ALX_MIB_RX_OK
+#define ALX_RX_STATS_END                               ALX_MIB_RX_ERRADDR
+#define ALX_TX_STATS_BIN                               ALX_MIB_TX_OK
+#define ALX_TX_STATS_END                               ALX_MIB_TX_MCCNT
+
+#define ALX_ISR                                                0x1600
+#define ALX_ISR_DIS                                    BIT(31)
+#define ALX_ISR_RX_Q7                                  BIT(30)
+#define ALX_ISR_RX_Q6                                  BIT(29)
+#define ALX_ISR_RX_Q5                                  BIT(28)
+#define ALX_ISR_RX_Q4                                  BIT(27)
+#define ALX_ISR_PCIE_LNKDOWN                           BIT(26)
+#define ALX_ISR_RX_Q3                                  BIT(19)
+#define ALX_ISR_RX_Q2                                  BIT(18)
+#define ALX_ISR_RX_Q1                                  BIT(17)
+#define ALX_ISR_RX_Q0                                  BIT(16)
+#define ALX_ISR_TX_Q0                                  BIT(15)
+#define ALX_ISR_PHY                                    BIT(12)
+#define ALX_ISR_DMAW                                   BIT(10)
+#define ALX_ISR_DMAR                                   BIT(9)
+#define ALX_ISR_TXF_UR                                 BIT(8)
+#define ALX_ISR_TX_Q3                                  BIT(7)
+#define ALX_ISR_TX_Q2                                  BIT(6)
+#define ALX_ISR_TX_Q1                                  BIT(5)
+#define ALX_ISR_RFD_UR                                 BIT(4)
+#define ALX_ISR_RXF_OV                                 BIT(3)
+#define ALX_ISR_MANU                                   BIT(2)
+#define ALX_ISR_TIMER                                  BIT(1)
+#define ALX_ISR_SMB                                    BIT(0)
+
+#define ALX_IMR                                                0x1604
+
+/* re-send assert msg if SW no response */
+#define ALX_INT_RETRIG                                 0x1608
+/* 40ms */
+#define ALX_INT_RETRIG_TO                              20000
+
+#define ALX_SMB_TIMER                                  0x15C4
+
+#define ALX_TINT_TPD_THRSHLD                           0x15C8
+
+#define ALX_TINT_TIMER                                 0x15CC
+
+#define ALX_CLK_GATE                                   0x1814
+#define ALX_CLK_GATE_RXMAC                             BIT(5)
+#define ALX_CLK_GATE_TXMAC                             BIT(4)
+#define ALX_CLK_GATE_RXQ                               BIT(3)
+#define ALX_CLK_GATE_TXQ                               BIT(2)
+#define ALX_CLK_GATE_DMAR                              BIT(1)
+#define ALX_CLK_GATE_DMAW                              BIT(0)
+#define ALX_CLK_GATE_ALL               (ALX_CLK_GATE_RXMAC | \
+                                        ALX_CLK_GATE_TXMAC | \
+                                        ALX_CLK_GATE_RXQ | \
+                                        ALX_CLK_GATE_TXQ | \
+                                        ALX_CLK_GATE_DMAR | \
+                                        ALX_CLK_GATE_DMAW)
+
+/* interop between drivers */
+#define ALX_DRV                                                0x1804
+#define ALX_DRV_PHY_AUTO                               BIT(28)
+#define ALX_DRV_PHY_1000                               BIT(27)
+#define ALX_DRV_PHY_100                                        BIT(26)
+#define ALX_DRV_PHY_10                                 BIT(25)
+#define ALX_DRV_PHY_DUPLEX                             BIT(24)
+/* bit23: adv Pause */
+#define ALX_DRV_PHY_PAUSE                              BIT(23)
+/* bit22: adv Asym Pause */
+#define ALX_DRV_PHY_MASK                               0xFF
+#define ALX_DRV_PHY_SHIFT                              21
+#define ALX_DRV_PHY_UNKNOWN                            0
+
+/* flag of phy inited */
+#define ALX_PHY_INITED                                 0x003F
+
+/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
+#define ALX_WOL_CTRL2                                  0x1830
+#define ALX_WOL_CTRL2_DATA_STORE                       BIT(3)
+#define ALX_WOL_CTRL2_PTRN_EVT                         BIT(2)
+#define ALX_WOL_CTRL2_PME_PTRN_EN                      BIT(1)
+#define ALX_WOL_CTRL2_PTRN_EN                          BIT(0)
+
+#define ALX_WOL_CTRL3                                  0x1834
+#define ALX_WOL_CTRL3_PTRN_ADDR_MASK                   0xFFFFF
+#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT                  0
+
+#define ALX_WOL_CTRL4                                  0x1838
+#define ALX_WOL_CTRL4_PT15_MATCH                       BIT(31)
+#define ALX_WOL_CTRL4_PT14_MATCH                       BIT(30)
+#define ALX_WOL_CTRL4_PT13_MATCH                       BIT(29)
+#define ALX_WOL_CTRL4_PT12_MATCH                       BIT(28)
+#define ALX_WOL_CTRL4_PT11_MATCH                       BIT(27)
+#define ALX_WOL_CTRL4_PT10_MATCH                       BIT(26)
+#define ALX_WOL_CTRL4_PT9_MATCH                                BIT(25)
+#define ALX_WOL_CTRL4_PT8_MATCH                                BIT(24)
+#define ALX_WOL_CTRL4_PT7_MATCH                                BIT(23)
+#define ALX_WOL_CTRL4_PT6_MATCH                                BIT(22)
+#define ALX_WOL_CTRL4_PT5_MATCH                                BIT(21)
+#define ALX_WOL_CTRL4_PT4_MATCH                                BIT(20)
+#define ALX_WOL_CTRL4_PT3_MATCH                                BIT(19)
+#define ALX_WOL_CTRL4_PT2_MATCH                                BIT(18)
+#define ALX_WOL_CTRL4_PT1_MATCH                                BIT(17)
+#define ALX_WOL_CTRL4_PT0_MATCH                                BIT(16)
+#define ALX_WOL_CTRL4_PT15_EN                          BIT(15)
+#define ALX_WOL_CTRL4_PT14_EN                          BIT(14)
+#define ALX_WOL_CTRL4_PT13_EN                          BIT(13)
+#define ALX_WOL_CTRL4_PT12_EN                          BIT(12)
+#define ALX_WOL_CTRL4_PT11_EN                          BIT(11)
+#define ALX_WOL_CTRL4_PT10_EN                          BIT(10)
+#define ALX_WOL_CTRL4_PT9_EN                           BIT(9)
+#define ALX_WOL_CTRL4_PT8_EN                           BIT(8)
+#define ALX_WOL_CTRL4_PT7_EN                           BIT(7)
+#define ALX_WOL_CTRL4_PT6_EN                           BIT(6)
+#define ALX_WOL_CTRL4_PT5_EN                           BIT(5)
+#define ALX_WOL_CTRL4_PT4_EN                           BIT(4)
+#define ALX_WOL_CTRL4_PT3_EN                           BIT(3)
+#define ALX_WOL_CTRL4_PT2_EN                           BIT(2)
+#define ALX_WOL_CTRL4_PT1_EN                           BIT(1)
+#define ALX_WOL_CTRL4_PT0_EN                           BIT(0)
+
+#define ALX_WOL_CTRL5                                  0x183C
+#define ALX_WOL_CTRL5_PT3_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT3_LEN_SHIFT                    24
+#define ALX_WOL_CTRL5_PT2_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT2_LEN_SHIFT                    16
+#define ALX_WOL_CTRL5_PT1_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT1_LEN_SHIFT                    8
+#define ALX_WOL_CTRL5_PT0_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT0_LEN_SHIFT                    0
+
+#define ALX_WOL_CTRL6                                  0x1840
+#define ALX_WOL_CTRL5_PT7_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT7_LEN_SHIFT                    24
+#define ALX_WOL_CTRL5_PT6_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT6_LEN_SHIFT                    16
+#define ALX_WOL_CTRL5_PT5_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT5_LEN_SHIFT                    8
+#define ALX_WOL_CTRL5_PT4_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT4_LEN_SHIFT                    0
+
+#define ALX_WOL_CTRL7                                  0x1844
+#define ALX_WOL_CTRL5_PT11_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT11_LEN_SHIFT                   24
+#define ALX_WOL_CTRL5_PT10_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT10_LEN_SHIFT                   16
+#define ALX_WOL_CTRL5_PT9_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT9_LEN_SHIFT                    8
+#define ALX_WOL_CTRL5_PT8_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT8_LEN_SHIFT                    0
+
+#define ALX_WOL_CTRL8                                  0x1848
+#define ALX_WOL_CTRL5_PT15_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT15_LEN_SHIFT                   24
+#define ALX_WOL_CTRL5_PT14_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT14_LEN_SHIFT                   16
+#define ALX_WOL_CTRL5_PT13_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT13_LEN_SHIFT                   8
+#define ALX_WOL_CTRL5_PT12_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT12_LEN_SHIFT                   0
+
+#define ALX_ACER_FIXED_PTN0                            0x1850
+#define ALX_ACER_FIXED_PTN0_MASK                       0xFFFFFFFF
+#define ALX_ACER_FIXED_PTN0_SHIFT                      0
+
+#define ALX_ACER_FIXED_PTN1                            0x1854
+#define ALX_ACER_FIXED_PTN1_MASK                       0xFFFF
+#define ALX_ACER_FIXED_PTN1_SHIFT                      0
+
+#define ALX_ACER_RANDOM_NUM0                           0x1858
+#define ALX_ACER_RANDOM_NUM0_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM0_SHIFT                     0
+
+#define ALX_ACER_RANDOM_NUM1                           0x185C
+#define ALX_ACER_RANDOM_NUM1_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM1_SHIFT                     0
+
+#define ALX_ACER_RANDOM_NUM2                           0x1860
+#define ALX_ACER_RANDOM_NUM2_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM2_SHIFT                     0
+
+#define ALX_ACER_RANDOM_NUM3                           0x1864
+#define ALX_ACER_RANDOM_NUM3_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM3_SHIFT                     0
+
+#define ALX_ACER_MAGIC                                 0x1868
+#define ALX_ACER_MAGIC_EN                              BIT(31)
+#define ALX_ACER_MAGIC_PME_EN                          BIT(30)
+#define ALX_ACER_MAGIC_MATCH                           BIT(29)
+#define ALX_ACER_MAGIC_FF_CHECK                                BIT(10)
+#define ALX_ACER_MAGIC_RAN_LEN_MASK                    0x1F
+#define ALX_ACER_MAGIC_RAN_LEN_SHIFT                   5
+#define ALX_ACER_MAGIC_FIX_LEN_MASK                    0x1F
+#define ALX_ACER_MAGIC_FIX_LEN_SHIFT                   0
+
+#define ALX_ACER_TIMER                                 0x186C
+#define ALX_ACER_TIMER_EN                              BIT(31)
+#define ALX_ACER_TIMER_PME_EN                          BIT(30)
+#define ALX_ACER_TIMER_MATCH                           BIT(29)
+#define ALX_ACER_TIMER_THRES_MASK                      0x1FFFF
+#define ALX_ACER_TIMER_THRES_SHIFT                     0
+#define ALX_ACER_TIMER_THRES_DEF                       1
+
+/* RSS definitions */
+#define ALX_RSS_KEY0                                   0x14B0
+#define ALX_RSS_KEY1                                   0x14B4
+#define ALX_RSS_KEY2                                   0x14B8
+#define ALX_RSS_KEY3                                   0x14BC
+#define ALX_RSS_KEY4                                   0x14C0
+#define ALX_RSS_KEY5                                   0x14C4
+#define ALX_RSS_KEY6                                   0x14C8
+#define ALX_RSS_KEY7                                   0x14CC
+#define ALX_RSS_KEY8                                   0x14D0
+#define ALX_RSS_KEY9                                   0x14D4
+
+#define ALX_RSS_IDT_TBL0                               0x1B00
+
+#define ALX_MSI_MAP_TBL1                               0x15D0
+#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT                    20
+#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT                    16
+#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT                    12
+#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT                    8
+#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT                    4
+#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT                    0
+
+#define ALX_MSI_MAP_TBL2                               0x15D8
+#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT                    20
+#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT                    16
+#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT                    12
+#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT                    8
+#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT                    4
+#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT                    0
+
+#define ALX_MSI_ID_MAP                                 0x15D4
+
+#define ALX_MSI_RETRANS_TIMER                          0x1920
+/* bit16: 1:line,0:standard */
+#define ALX_MSI_MASK_SEL_LINE                          BIT(16)
+#define ALX_MSI_RETRANS_TM_MASK                                0xFFFF
+#define ALX_MSI_RETRANS_TM_SHIFT                       0
+
+/* CR DMA ctrl */
+
+/* TX QoS */
+#define ALX_WRR                                                0x1938
+#define ALX_WRR_PRI_MASK                               0x3
+#define ALX_WRR_PRI_SHIFT                              29
+#define ALX_WRR_PRI_RESTRICT_NONE                      3
+#define ALX_WRR_PRI3_MASK                              0x1F
+#define ALX_WRR_PRI3_SHIFT                             24
+#define ALX_WRR_PRI2_MASK                              0x1F
+#define ALX_WRR_PRI2_SHIFT                             16
+#define ALX_WRR_PRI1_MASK                              0x1F
+#define ALX_WRR_PRI1_SHIFT                             8
+#define ALX_WRR_PRI0_MASK                              0x1F
+#define ALX_WRR_PRI0_SHIFT                             0
+
+#define ALX_HQTPD                                      0x193C
+#define ALX_HQTPD_BURST_EN                             BIT(31)
+#define ALX_HQTPD_Q3_NUMPREF_MASK                      0xF
+#define ALX_HQTPD_Q3_NUMPREF_SHIFT                     8
+#define ALX_HQTPD_Q2_NUMPREF_MASK                      0xF
+#define ALX_HQTPD_Q2_NUMPREF_SHIFT                     4
+#define ALX_HQTPD_Q1_NUMPREF_MASK                      0xF
+#define ALX_HQTPD_Q1_NUMPREF_SHIFT                     0
+
+#define ALX_MISC                                       0x19C0
+#define ALX_MISC_PSW_OCP_MASK                          0x7
+#define ALX_MISC_PSW_OCP_SHIFT                         21
+#define ALX_MISC_PSW_OCP_DEF                           0x7
+#define ALX_MISC_ISO_EN                                        BIT(12)
+#define ALX_MISC_INTNLOSC_OPEN                         BIT(3)
+
+#define ALX_MSIC2                                      0x19C8
+#define ALX_MSIC2_CALB_START                           BIT(0)
+
+#define ALX_MISC3                                      0x19CC
+/* bit1: 1:Software control 25M */
+#define ALX_MISC3_25M_BY_SW                            BIT(1)
+/* bit0: 25M switch to intnl OSC */
+#define ALX_MISC3_25M_NOTO_INTNL                       BIT(0)
+
+/* MSIX tbl in memory space */
+#define ALX_MSIX_ENTRY_BASE                            0x2000
+
+/********************* PHY regs definition ***************************/
+
+/* PHY Specific Status Register */
+#define ALX_MII_GIGA_PSSR                              0x11
+#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED                        0x0800
+#define ALX_GIGA_PSSR_DPLX                             0x2000
+#define ALX_GIGA_PSSR_SPEED                            0xC000
+#define ALX_GIGA_PSSR_10MBS                            0x0000
+#define ALX_GIGA_PSSR_100MBS                           0x4000
+#define ALX_GIGA_PSSR_1000MBS                          0x8000
+
+/* PHY Interrupt Enable Register */
+#define ALX_MII_IER                                    0x12
+#define ALX_IER_LINK_UP                                        0x0400
+#define ALX_IER_LINK_DOWN                              0x0800
+
+/* PHY Interrupt Status Register */
+#define ALX_MII_ISR                                    0x13
+
+#define ALX_MII_DBG_ADDR                               0x1D
+#define ALX_MII_DBG_DATA                               0x1E
+
+/***************************** debug port *************************************/
+
+#define ALX_MIIDBG_ANACTRL                             0x00
+#define ALX_ANACTRL_DEF                                        0x02EF
+
+#define ALX_MIIDBG_SYSMODCTRL                          0x04
+/* en half bias */
+#define ALX_SYSMODCTRL_IECHOADJ_DEF                    0xBB8B
+
+#define ALX_MIIDBG_SRDSYSMOD                           0x05
+#define ALX_SRDSYSMOD_DEEMP_EN                         0x0040
+#define ALX_SRDSYSMOD_DEF                              0x2C46
+
+#define ALX_MIIDBG_HIBNEG                              0x0B
+#define ALX_HIBNEG_PSHIB_EN                            0x8000
+#define ALX_HIBNEG_HIB_PSE                             0x1000
+#define ALX_HIBNEG_DEF                                 0xBC40
+#define ALX_HIBNEG_NOHIB       (ALX_HIBNEG_DEF & \
+                                ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
+
+#define ALX_MIIDBG_TST10BTCFG                          0x12
+#define ALX_TST10BTCFG_DEF                             0x4C04
+
+#define ALX_MIIDBG_AZ_ANADECT                          0x15
+#define ALX_AZ_ANADECT_DEF                             0x3220
+#define ALX_AZ_ANADECT_LONG                            0x3210
+
+#define ALX_MIIDBG_MSE16DB                             0x18
+#define ALX_MSE16DB_UP                                 0x05EA
+#define ALX_MSE16DB_DOWN                               0x02EA
+
+#define ALX_MIIDBG_MSE20DB                             0x1C
+#define ALX_MSE20DB_TH_MASK                            0x7F
+#define ALX_MSE20DB_TH_SHIFT                           2
+#define ALX_MSE20DB_TH_DEF                             0x2E
+#define ALX_MSE20DB_TH_HI                              0x54
+
+#define ALX_MIIDBG_AGC                                 0x23
+#define ALX_AGC_2_VGA_MASK                             0x3FU
+#define ALX_AGC_2_VGA_SHIFT                            8
+#define ALX_AGC_LONG1G_LIMT                            40
+#define ALX_AGC_LONG100M_LIMT                          44
+
+#define ALX_MIIDBG_LEGCYPS                             0x29
+#define ALX_LEGCYPS_EN                                 0x8000
+#define ALX_LEGCYPS_DEF                                        0x129D
+
+#define ALX_MIIDBG_TST100BTCFG                         0x36
+#define ALX_TST100BTCFG_DEF                            0xE12C
+
+#define ALX_MIIDBG_GREENCFG                            0x3B
+#define ALX_GREENCFG_DEF                               0x7078
+
+#define ALX_MIIDBG_GREENCFG2                           0x3D
+#define ALX_GREENCFG2_BP_GREEN                         0x8000
+#define ALX_GREENCFG2_GATE_DFSE_EN                     0x0080
+
+/******* dev 3 *********/
+#define ALX_MIIEXT_PCS                                 3
+
+#define ALX_MIIEXT_CLDCTRL3                            0x8003
+#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT                        0x8000
+
+#define ALX_MIIEXT_CLDCTRL5                            0x8005
+#define ALX_CLDCTRL5_BP_VD_HLFBIAS                     0x4000
+
+#define ALX_MIIEXT_CLDCTRL6                            0x8006
+#define ALX_CLDCTRL6_CAB_LEN_MASK                      0xFF
+#define ALX_CLDCTRL6_CAB_LEN_SHIFT                     0
+#define ALX_CLDCTRL6_CAB_LEN_SHORT1G                   116
+#define ALX_CLDCTRL6_CAB_LEN_SHORT100M                 152
+
+#define ALX_MIIEXT_VDRVBIAS                            0x8062
+#define ALX_VDRVBIAS_DEF                               0x3
+
+/********* dev 7 **********/
+#define ALX_MIIEXT_ANEG                                        7
+
+#define ALX_MIIEXT_LOCAL_EEEADV                                0x3C
+#define ALX_LOCAL_EEEADV_1000BT                                0x0004
+#define ALX_LOCAL_EEEADV_100BT                         0x0002
+
+#define ALX_MIIEXT_AFE                                 0x801A
+#define ALX_AFE_10BT_100M_TH                           0x0040
+
+#define ALX_MIIEXT_S3DIG10                             0x8023
+/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
+#define ALX_MIIEXT_S3DIG10_SL                          0x0001
+#define ALX_MIIEXT_S3DIG10_DEF                         0
+
+#define ALX_MIIEXT_NLP78                               0x8027
+#define ALX_MIIEXT_NLP78_120M_DEF                      0x8A05
+
+#endif
index 0ba900762b138d87a3547a0a6d810bcfb151429d..786a87483298ea400733b987da389144e297824b 100644 (file)
@@ -2755,27 +2755,4 @@ static struct pci_driver atl1c_driver = {
        .driver.pm = &atl1c_pm_ops,
 };
 
-/**
- * atl1c_init_module - Driver Registration Routine
- *
- * atl1c_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- */
-static int __init atl1c_init_module(void)
-{
-       return pci_register_driver(&atl1c_driver);
-}
-
-/**
- * atl1c_exit_module - Driver Exit Cleanup Routine
- *
- * atl1c_exit_module is called just before the driver is removed
- * from memory.
- */
-static void __exit atl1c_exit_module(void)
-{
-       pci_unregister_driver(&atl1c_driver);
-}
-
-module_init(atl1c_init_module);
-module_exit(atl1c_exit_module);
+module_pci_driver(atl1c_driver);
index 0688bb82b442e9cd55d5ce6da9d1a9f064295403..895f5377ad1ba2ad7819d162b8f1395d974111d7 100644 (file)
@@ -2489,27 +2489,4 @@ static struct pci_driver atl1e_driver = {
        .err_handler = &atl1e_err_handler
 };
 
-/**
- * atl1e_init_module - Driver Registration Routine
- *
- * atl1e_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- */
-static int __init atl1e_init_module(void)
-{
-       return pci_register_driver(&atl1e_driver);
-}
-
-/**
- * atl1e_exit_module - Driver Exit Cleanup Routine
- *
- * atl1e_exit_module is called just before the driver is removed
- * from memory.
- */
-static void __exit atl1e_exit_module(void)
-{
-       pci_unregister_driver(&atl1e_driver);
-}
-
-module_init(atl1e_init_module);
-module_exit(atl1e_exit_module);
+module_pci_driver(atl1e_driver);
index fa0915f3999b24c8d3d53d0efddad6ad7efb98d6..538211d6f7d9ad5fc806008a58d984750c91edcd 100644 (file)
@@ -3145,31 +3145,6 @@ static struct pci_driver atl1_driver = {
        .driver.pm = &atl1_pm_ops,
 };
 
-/**
- * atl1_exit_module - Driver Exit Cleanup Routine
- *
- * atl1_exit_module is called just before the driver is removed
- * from memory.
- */
-static void __exit atl1_exit_module(void)
-{
-       pci_unregister_driver(&atl1_driver);
-}
-
-/**
- * atl1_init_module - Driver Registration Routine
- *
- * atl1_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- */
-static int __init atl1_init_module(void)
-{
-       return pci_register_driver(&atl1_driver);
-}
-
-module_init(atl1_init_module);
-module_exit(atl1_exit_module);
-
 struct atl1_stats {
        char stat_string[ETH_GSTRING_LEN];
        int sizeof_stat;
@@ -3705,3 +3680,5 @@ static const struct ethtool_ops atl1_ethtool_ops = {
        .get_ethtool_stats      = atl1_get_ethtool_stats,
        .get_sset_count         = atl1_get_sset_count,
 };
+
+module_pci_driver(atl1_driver);
index 3e69b3f88099ba931c9f6b18f4a71da82d182bdc..1d680baf43d620d2064395750a8ce545b2803126 100644 (file)
@@ -22,7 +22,6 @@ config B44
        tristate "Broadcom 440x/47xx ethernet support"
        depends on SSB_POSSIBLE && HAS_DMA
        select SSB
-       select NET_CORE
        select MII
        ---help---
          If you have a network (Ethernet) controller of this type, say Y
@@ -54,7 +53,6 @@ config B44_PCI
 config BCM63XX_ENET
        tristate "Broadcom 63xx internal mac support"
        depends on BCM63XX
-       select NET_CORE
        select MII
        select PHYLIB
        help
index 0b3e23ec37f769dde56f47f475b262c5c101f35d..b1bcd4ba47444e0f36c1bc4f9d8a2ff446ba5882 100644 (file)
@@ -41,8 +41,8 @@ static int copybreak __read_mostly = 128;
 module_param(copybreak, int, 0);
 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
 
-/* io memory shared between all devices */
-static void __iomem *bcm_enet_shared_base;
+/* io registers memory shared between all devices */
+static void __iomem *bcm_enet_shared_base[3];
 
 /*
  * io helpers to access mac registers
@@ -59,17 +59,76 @@ static inline void enet_writel(struct bcm_enet_priv *priv,
 }
 
 /*
- * io helpers to access shared registers
+ * io helpers to access switch registers
  */
+static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
+{
+       return bcm_readl(priv->base + off);
+}
+
+static inline void enetsw_writel(struct bcm_enet_priv *priv,
+                                u32 val, u32 off)
+{
+       bcm_writel(val, priv->base + off);
+}
+
+static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
+{
+       return bcm_readw(priv->base + off);
+}
+
+static inline void enetsw_writew(struct bcm_enet_priv *priv,
+                                u16 val, u32 off)
+{
+       bcm_writew(val, priv->base + off);
+}
+
+static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
+{
+       return bcm_readb(priv->base + off);
+}
+
+static inline void enetsw_writeb(struct bcm_enet_priv *priv,
+                                u8 val, u32 off)
+{
+       bcm_writeb(val, priv->base + off);
+}
+
+
+/* io helpers to access shared registers */
 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
 {
-       return bcm_readl(bcm_enet_shared_base + off);
+       return bcm_readl(bcm_enet_shared_base[0] + off);
 }
 
 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
                                       u32 val, u32 off)
 {
-       bcm_writel(val, bcm_enet_shared_base + off);
+       bcm_writel(val, bcm_enet_shared_base[0] + off);
+}
+
+static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
+{
+       return bcm_readl(bcm_enet_shared_base[1] +
+               bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
+}
+
+static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
+                                      u32 val, u32 off, int chan)
+{
+       bcm_writel(val, bcm_enet_shared_base[1] +
+               bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
+}
+
+static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
+{
+       return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
+}
+
+static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
+                                      u32 val, u32 off, int chan)
+{
+       bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 }
 
 /*
@@ -196,7 +255,6 @@ static int bcm_enet_refill_rx(struct net_device *dev)
                        if (!skb)
                                break;
                        priv->rx_skb[desc_idx] = skb;
-
                        p = dma_map_single(&priv->pdev->dev, skb->data,
                                           priv->rx_skb_size,
                                           DMA_FROM_DEVICE);
@@ -206,7 +264,7 @@ static int bcm_enet_refill_rx(struct net_device *dev)
                len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
                len_stat |= DMADESC_OWNER_MASK;
                if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
-                       len_stat |= DMADESC_WRAP_MASK;
+                       len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
                        priv->rx_dirty_desc = 0;
                } else {
                        priv->rx_dirty_desc++;
@@ -217,7 +275,10 @@ static int bcm_enet_refill_rx(struct net_device *dev)
                priv->rx_desc_count++;
 
                /* tell dma engine we allocated one buffer */
-               enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
+               if (priv->dma_has_sram)
+                       enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
+               else
+                       enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
        }
 
        /* If rx ring is still empty, set a timer to try allocating
@@ -293,13 +354,15 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 
                /* if the packet does not have start of packet _and_
                 * end of packet flag set, then just recycle it */
-               if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
+               if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
+                       (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
                        dev->stats.rx_dropped++;
                        continue;
                }
 
                /* recycle packet if it's marked as bad */
-               if (unlikely(len_stat & DMADESC_ERR_MASK)) {
+               if (!priv->enet_is_sw &&
+                   unlikely(len_stat & DMADESC_ERR_MASK)) {
                        dev->stats.rx_errors++;
 
                        if (len_stat & DMADESC_OVSIZE_MASK)
@@ -353,8 +416,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
                bcm_enet_refill_rx(dev);
 
                /* kick rx dma */
-               enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
-                               ENETDMA_CHANCFG_REG(priv->rx_chan));
+               enet_dmac_writel(priv, priv->dma_chan_en_mask,
+                                        ENETDMAC_CHANCFG, priv->rx_chan);
        }
 
        return processed;
@@ -429,10 +492,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
        dev = priv->net_dev;
 
        /* ack interrupts */
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IR_REG(priv->rx_chan));
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IR_REG(priv->tx_chan));
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IR, priv->rx_chan);
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IR, priv->tx_chan);
 
        /* reclaim sent skb */
        tx_work_done = bcm_enet_tx_reclaim(dev, 0);
@@ -451,10 +514,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
        napi_complete(napi);
 
        /* restore rx/tx interrupt */
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IRMASK_REG(priv->rx_chan));
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IRMASK_REG(priv->tx_chan));
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IRMASK, priv->tx_chan);
 
        return rx_work_done;
 }
@@ -497,8 +560,8 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
        priv = netdev_priv(dev);
 
        /* mask rx/tx interrupts */
-       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 
        napi_schedule(&priv->napi);
 
@@ -530,6 +593,26 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out_unlock;
        }
 
+       /* pad small packets sent on a switch device */
+       if (priv->enet_is_sw && skb->len < 64) {
+               int needed = 64 - skb->len;
+               char *data;
+
+               if (unlikely(skb_tailroom(skb) < needed)) {
+                       struct sk_buff *nskb;
+
+                       nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
+                       if (!nskb) {
+                               ret = NETDEV_TX_BUSY;
+                               goto out_unlock;
+                       }
+                       dev_kfree_skb(skb);
+                       skb = nskb;
+               }
+               data = skb_put(skb, needed);
+               memset(data, 0, needed);
+       }
+
        /* point to the next available desc */
        desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
        priv->tx_skb[priv->tx_curr_desc] = skb;
@@ -539,14 +622,14 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                       DMA_TO_DEVICE);
 
        len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
-       len_stat |= DMADESC_ESOP_MASK |
+       len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
                DMADESC_APPEND_CRC |
                DMADESC_OWNER_MASK;
 
        priv->tx_curr_desc++;
        if (priv->tx_curr_desc == priv->tx_ring_size) {
                priv->tx_curr_desc = 0;
-               len_stat |= DMADESC_WRAP_MASK;
+               len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
        }
        priv->tx_desc_count--;
 
@@ -557,8 +640,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        wmb();
 
        /* kick tx dma */
-       enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
-                       ENETDMA_CHANCFG_REG(priv->tx_chan));
+       enet_dmac_writel(priv, priv->dma_chan_en_mask,
+                                ENETDMAC_CHANCFG, priv->tx_chan);
 
        /* stop queue if no more desc available */
        if (!priv->tx_desc_count)
@@ -686,6 +769,9 @@ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
                val &= ~ENET_RXCFG_ENFLOW_MASK;
        enet_writel(priv, val, ENET_RXCFG_REG);
 
+       if (!priv->dma_has_sram)
+               return;
+
        /* tx flow control (pause frame generation) */
        val = enet_dma_readl(priv, ENETDMA_CFG_REG);
        if (tx_en)
@@ -833,8 +919,8 @@ static int bcm_enet_open(struct net_device *dev)
 
        /* mask all interrupts and request them */
        enet_writel(priv, 0, ENET_IRMASK_REG);
-       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 
        ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
        if (ret)
@@ -909,8 +995,12 @@ static int bcm_enet_open(struct net_device *dev)
        priv->rx_curr_desc = 0;
 
        /* initialize flow control buffer allocation */
-       enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
-                       ENETDMA_BUFALLOC_REG(priv->rx_chan));
+       if (priv->dma_has_sram)
+               enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+                               ENETDMA_BUFALLOC_REG(priv->rx_chan));
+       else
+               enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+                               ENETDMAC_BUFALLOC, priv->rx_chan);
 
        if (bcm_enet_refill_rx(dev)) {
                dev_err(kdev, "cannot allocate rx skb queue\n");
@@ -919,37 +1009,55 @@ static int bcm_enet_open(struct net_device *dev)
        }
 
        /* write rx & tx ring addresses */
-       enet_dma_writel(priv, priv->rx_desc_dma,
-                       ENETDMA_RSTART_REG(priv->rx_chan));
-       enet_dma_writel(priv, priv->tx_desc_dma,
-                       ENETDMA_RSTART_REG(priv->tx_chan));
+       if (priv->dma_has_sram) {
+               enet_dmas_writel(priv, priv->rx_desc_dma,
+                                ENETDMAS_RSTART_REG, priv->rx_chan);
+               enet_dmas_writel(priv, priv->tx_desc_dma,
+                        ENETDMAS_RSTART_REG, priv->tx_chan);
+       } else {
+               enet_dmac_writel(priv, priv->rx_desc_dma,
+                               ENETDMAC_RSTART, priv->rx_chan);
+               enet_dmac_writel(priv, priv->tx_desc_dma,
+                               ENETDMAC_RSTART, priv->tx_chan);
+       }
 
        /* clear remaining state ram for rx & tx channel */
-       enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
+       if (priv->dma_has_sram) {
+               enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
+               enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
+               enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
+               enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
+               enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
+               enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
+       } else {
+               enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
+               enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
+       }
 
        /* set max rx/tx length */
        enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
        enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
 
        /* set dma maximum burst len */
-       enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
-                       ENETDMA_MAXBURST_REG(priv->rx_chan));
-       enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
-                       ENETDMA_MAXBURST_REG(priv->tx_chan));
+       enet_dmac_writel(priv, priv->dma_maxburst,
+                        ENETDMAC_MAXBURST, priv->rx_chan);
+       enet_dmac_writel(priv, priv->dma_maxburst,
+                        ENETDMAC_MAXBURST, priv->tx_chan);
 
        /* set correct transmit fifo watermark */
        enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
 
        /* set flow control low/high threshold to 1/3 / 2/3 */
-       val = priv->rx_ring_size / 3;
-       enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
-       val = (priv->rx_ring_size * 2) / 3;
-       enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
+       if (priv->dma_has_sram) {
+               val = priv->rx_ring_size / 3;
+               enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
+               val = (priv->rx_ring_size * 2) / 3;
+               enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
+       } else {
+               enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
+               enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
+               enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
+       }
 
        /* all set, enable mac and interrupts, start dma engine and
         * kick rx dma channel */
@@ -958,26 +1066,26 @@ static int bcm_enet_open(struct net_device *dev)
        val |= ENET_CTL_ENABLE_MASK;
        enet_writel(priv, val, ENET_CTL_REG);
        enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
-       enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
-                       ENETDMA_CHANCFG_REG(priv->rx_chan));
+       enet_dmac_writel(priv, priv->dma_chan_en_mask,
+                        ENETDMAC_CHANCFG, priv->rx_chan);
 
        /* watch "mib counters about to overflow" interrupt */
        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
        enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
 
        /* watch "packet transferred" interrupt in rx and tx */
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IR_REG(priv->rx_chan));
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IR_REG(priv->tx_chan));
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IR, priv->rx_chan);
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IR, priv->tx_chan);
 
        /* make sure we enable napi before rx interrupt  */
        napi_enable(&priv->napi);
 
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IRMASK_REG(priv->rx_chan));
-       enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
-                       ENETDMA_IRMASK_REG(priv->tx_chan));
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, priv->dma_chan_int_mask,
+                        ENETDMAC_IRMASK, priv->tx_chan);
 
        if (priv->has_phy)
                phy_start(priv->phydev);
@@ -1057,14 +1165,14 @@ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
 {
        int limit;
 
-       enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
+       enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
 
        limit = 1000;
        do {
                u32 val;
 
-               val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
-               if (!(val & ENETDMA_CHANCFG_EN_MASK))
+               val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
+               if (!(val & ENETDMAC_CHANCFG_EN_MASK))
                        break;
                udelay(1);
        } while (limit--);
@@ -1090,8 +1198,8 @@ static int bcm_enet_stop(struct net_device *dev)
 
        /* mask all interrupts */
        enet_writel(priv, 0, ENET_IRMASK_REG);
-       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
-       enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 
        /* make sure no mib update is scheduled */
        cancel_work_sync(&priv->mib_update_task);
@@ -1328,6 +1436,20 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
        mutex_unlock(&priv->mib_update_lock);
 }
 
+static int bcm_enet_nway_reset(struct net_device *dev)
+{
+       struct bcm_enet_priv *priv;
+
+       priv = netdev_priv(dev);
+       if (priv->has_phy) {
+               if (!priv->phydev)
+                       return -ENODEV;
+               return genphy_restart_aneg(priv->phydev);
+       }
+
+       return -EOPNOTSUPP;
+}
+
 static int bcm_enet_get_settings(struct net_device *dev,
                                 struct ethtool_cmd *cmd)
 {
@@ -1470,6 +1592,7 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = {
        .get_strings            = bcm_enet_get_strings,
        .get_sset_count         = bcm_enet_get_sset_count,
        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
+       .nway_reset             = bcm_enet_nway_reset,
        .get_settings           = bcm_enet_get_settings,
        .set_settings           = bcm_enet_set_settings,
        .get_drvinfo            = bcm_enet_get_drvinfo,
@@ -1530,7 +1653,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
         * it's appended
         */
        priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
-                                 BCMENET_DMA_MAXBURST * 4);
+                                 priv->dma_maxburst * 4);
        return 0;
 }
 
@@ -1621,7 +1744,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
 
        /* stop if shared driver failed, assume driver->probe will be
         * called in the same order we register devices (correct ?) */
-       if (!bcm_enet_shared_base)
+       if (!bcm_enet_shared_base[0])
                return -ENODEV;
 
        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1637,6 +1760,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
                return -ENOMEM;
        priv = netdev_priv(dev);
 
+       priv->enet_is_sw = false;
+       priv->dma_maxburst = BCMENET_DMA_MAXBURST;
+
        ret = compute_hw_mtu(priv, dev->mtu);
        if (ret)
                goto out;
@@ -1687,6 +1813,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
                priv->pause_tx = pd->pause_tx;
                priv->force_duplex_full = pd->force_duplex_full;
                priv->force_speed_100 = pd->force_speed_100;
+               priv->dma_chan_en_mask = pd->dma_chan_en_mask;
+               priv->dma_chan_int_mask = pd->dma_chan_int_mask;
+               priv->dma_chan_width = pd->dma_chan_width;
+               priv->dma_has_sram = pd->dma_has_sram;
+               priv->dma_desc_shift = pd->dma_desc_shift;
        }
 
        if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
@@ -1847,7 +1978,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
        clk_disable_unprepare(priv->mac_clk);
        clk_put(priv->mac_clk);
 
-       platform_set_drvdata(pdev, NULL);
        free_netdev(dev);
        return 0;
 }
@@ -1862,55 +1992,920 @@ struct platform_driver bcm63xx_enet_driver = {
 };
 
 /*
- * reserve & remap memory space shared between all macs
+ * switch mii access callbacks
  */
-static int bcm_enet_shared_probe(struct platform_device *pdev)
+static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
+                               int ext, int phy_id, int location)
 {
-       struct resource *res;
+       u32 reg;
+       int ret;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
+       spin_lock_bh(&priv->enetsw_mdio_lock);
+       enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
 
-       bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!bcm_enet_shared_base)
-               return -ENOMEM;
+       reg = ENETSW_MDIOC_RD_MASK |
+               (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
+               (location << ENETSW_MDIOC_REG_SHIFT);
 
-       return 0;
+       if (ext)
+               reg |= ENETSW_MDIOC_EXT_MASK;
+
+       enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
+       udelay(50);
+       ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
+       spin_unlock_bh(&priv->enetsw_mdio_lock);
+       return ret;
 }
 
-static int bcm_enet_shared_remove(struct platform_device *pdev)
+static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
+                                int ext, int phy_id, int location,
+                                uint16_t data)
 {
-       return 0;
+       u32 reg;
+
+       spin_lock_bh(&priv->enetsw_mdio_lock);
+       enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
+
+       reg = ENETSW_MDIOC_WR_MASK |
+               (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
+               (location << ENETSW_MDIOC_REG_SHIFT);
+
+       if (ext)
+               reg |= ENETSW_MDIOC_EXT_MASK;
+
+       reg |= data;
+
+       enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
+       udelay(50);
+       spin_unlock_bh(&priv->enetsw_mdio_lock);
+}
+
+static inline int bcm_enet_port_is_rgmii(int portid)
+{
+       return portid >= ENETSW_RGMII_PORT0;
 }
 
 /*
- * this "shared" driver is needed because both macs share a single
- * address space
+ * enet sw PHY polling
  */
-struct platform_driver bcm63xx_enet_shared_driver = {
-       .probe  = bcm_enet_shared_probe,
-       .remove = bcm_enet_shared_remove,
-       .driver = {
-               .name   = "bcm63xx_enet_shared",
-               .owner  = THIS_MODULE,
-       },
-};
+static void swphy_poll_timer(unsigned long data)
+{
+       struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
+       unsigned int i;
+
+       for (i = 0; i < priv->num_ports; i++) {
+               struct bcm63xx_enetsw_port *port;
+               int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
+               int external_phy = bcm_enet_port_is_rgmii(i);
+               u8 override;
+
+               port = &priv->used_ports[i];
+               if (!port->used)
+                       continue;
+
+               if (port->bypass_link)
+                       continue;
+
+               /* dummy read to clear */
+               for (j = 0; j < 2; j++)
+                       val = bcmenet_sw_mdio_read(priv, external_phy,
+                                                  port->phy_id, MII_BMSR);
+
+               if (val == 0xffff)
+                       continue;
+
+               up = (val & BMSR_LSTATUS) ? 1 : 0;
+               if (!(up ^ priv->sw_port_link[i]))
+                       continue;
+
+               priv->sw_port_link[i] = up;
+
+               /* link changed */
+               if (!up) {
+                       dev_info(&priv->pdev->dev, "link DOWN on %s\n",
+                                port->name);
+                       enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
+                                     ENETSW_PORTOV_REG(i));
+                       enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
+                                     ENETSW_PTCTRL_TXDIS_MASK,
+                                     ENETSW_PTCTRL_REG(i));
+                       continue;
+               }
+
+               advertise = bcmenet_sw_mdio_read(priv, external_phy,
+                                                port->phy_id, MII_ADVERTISE);
+
+               lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
+                                          MII_LPA);
+
+               lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
+                                           MII_STAT1000);
+
+               /* figure out media and duplex from advertise and LPA values */
+               media = mii_nway_result(lpa & advertise);
+               duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+               if (lpa2 & LPA_1000FULL)
+                       duplex = 1;
+
+               if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
+                       speed = 1000;
+               else {
+                       if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+                               speed = 100;
+                       else
+                               speed = 10;
+               }
+
+               dev_info(&priv->pdev->dev,
+                        "link UP on %s, %dMbps, %s-duplex\n",
+                        port->name, speed, duplex ? "full" : "half");
+
+               override = ENETSW_PORTOV_ENABLE_MASK |
+                       ENETSW_PORTOV_LINKUP_MASK;
+
+               if (speed == 1000)
+                       override |= ENETSW_IMPOV_1000_MASK;
+               else if (speed == 100)
+                       override |= ENETSW_IMPOV_100_MASK;
+               if (duplex)
+                       override |= ENETSW_IMPOV_FDX_MASK;
+
+               enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
+               enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
+       }
+
+       priv->swphy_poll.expires = jiffies + HZ;
+       add_timer(&priv->swphy_poll);
+}
 
 /*
- * entry point
+ * open callback, allocate dma rings & buffers and start rx operation
  */
-static int __init bcm_enet_init(void)
+static int bcm_enetsw_open(struct net_device *dev)
 {
-       int ret;
+       struct bcm_enet_priv *priv;
+       struct device *kdev;
+       int i, ret;
+       unsigned int size;
+       void *p;
+       u32 val;
 
-       ret = platform_driver_register(&bcm63xx_enet_shared_driver);
-       if (ret)
-               return ret;
+       priv = netdev_priv(dev);
+       kdev = &priv->pdev->dev;
 
-       ret = platform_driver_register(&bcm63xx_enet_driver);
+       /* mask all interrupts and request them */
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
+
+       ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
+                         IRQF_DISABLED, dev->name, dev);
        if (ret)
-               platform_driver_unregister(&bcm63xx_enet_shared_driver);
+               goto out_freeirq;
+
+       if (priv->irq_tx != -1) {
+               ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
+                                 IRQF_DISABLED, dev->name, dev);
+               if (ret)
+                       goto out_freeirq_rx;
+       }
+
+       /* allocate rx dma ring */
+       size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
+       p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+       if (!p) {
+               dev_err(kdev, "cannot allocate rx ring %u\n", size);
+               ret = -ENOMEM;
+               goto out_freeirq_tx;
+       }
+
+       memset(p, 0, size);
+       priv->rx_desc_alloc_size = size;
+       priv->rx_desc_cpu = p;
+
+       /* allocate tx dma ring */
+       size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
+       p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+       if (!p) {
+               dev_err(kdev, "cannot allocate tx ring\n");
+               ret = -ENOMEM;
+               goto out_free_rx_ring;
+       }
+
+       memset(p, 0, size);
+       priv->tx_desc_alloc_size = size;
+       priv->tx_desc_cpu = p;
+
+       priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+                              GFP_KERNEL);
+       if (!priv->tx_skb) {
+               dev_err(kdev, "cannot allocate rx skb queue\n");
+               ret = -ENOMEM;
+               goto out_free_tx_ring;
+       }
+
+       priv->tx_desc_count = priv->tx_ring_size;
+       priv->tx_dirty_desc = 0;
+       priv->tx_curr_desc = 0;
+       spin_lock_init(&priv->tx_lock);
+
+       /* init & fill rx ring with skbs */
+       priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+                              GFP_KERNEL);
+       if (!priv->rx_skb) {
+               dev_err(kdev, "cannot allocate rx skb queue\n");
+               ret = -ENOMEM;
+               goto out_free_tx_skb;
+       }
+
+       priv->rx_desc_count = 0;
+       priv->rx_dirty_desc = 0;
+       priv->rx_curr_desc = 0;
+
+       /* disable all ports */
+       for (i = 0; i < priv->num_ports; i++) {
+               enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
+                             ENETSW_PORTOV_REG(i));
+               enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
+                             ENETSW_PTCTRL_TXDIS_MASK,
+                             ENETSW_PTCTRL_REG(i));
+
+               priv->sw_port_link[i] = 0;
+       }
+
+       /* reset mib */
+       val = enetsw_readb(priv, ENETSW_GMCR_REG);
+       val |= ENETSW_GMCR_RST_MIB_MASK;
+       enetsw_writeb(priv, val, ENETSW_GMCR_REG);
+       mdelay(1);
+       val &= ~ENETSW_GMCR_RST_MIB_MASK;
+       enetsw_writeb(priv, val, ENETSW_GMCR_REG);
+       mdelay(1);
+
+       /* force CPU port state */
+       val = enetsw_readb(priv, ENETSW_IMPOV_REG);
+       val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
+       enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
+
+       /* enable switch forward engine */
+       val = enetsw_readb(priv, ENETSW_SWMODE_REG);
+       val |= ENETSW_SWMODE_FWD_EN_MASK;
+       enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
+
+       /* enable jumbo on all ports */
+       enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
+       enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
+
+       /* initialize flow control buffer allocation */
+       enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+                       ENETDMA_BUFALLOC_REG(priv->rx_chan));
+
+       if (bcm_enet_refill_rx(dev)) {
+               dev_err(kdev, "cannot allocate rx skb queue\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* write rx & tx ring addresses */
+       enet_dmas_writel(priv, priv->rx_desc_dma,
+                        ENETDMAS_RSTART_REG, priv->rx_chan);
+       enet_dmas_writel(priv, priv->tx_desc_dma,
+                        ENETDMAS_RSTART_REG, priv->tx_chan);
+
+       /* clear remaining state ram for rx & tx channel */
+       enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
+       enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
+       enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
+       enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
+       enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
+       enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
+
+       /* set dma maximum burst len */
+       enet_dmac_writel(priv, priv->dma_maxburst,
+                        ENETDMAC_MAXBURST, priv->rx_chan);
+       enet_dmac_writel(priv, priv->dma_maxburst,
+                        ENETDMAC_MAXBURST, priv->tx_chan);
+
+       /* set flow control low/high threshold to 1/3 / 2/3 */
+       val = priv->rx_ring_size / 3;
+       enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
+       val = (priv->rx_ring_size * 2) / 3;
+       enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
+
+       /* all set, enable mac and interrupts, start dma engine and
+        * kick rx dma channel
+        */
+       wmb();
+       enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+       enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
+                        ENETDMAC_CHANCFG, priv->rx_chan);
+
+       /* watch "packet transferred" interrupt in rx and tx */
+       enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+                        ENETDMAC_IR, priv->rx_chan);
+       enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+                        ENETDMAC_IR, priv->tx_chan);
+
+       /* make sure we enable napi before rx interrupt  */
+       napi_enable(&priv->napi);
+
+       enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+                        ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+                        ENETDMAC_IRMASK, priv->tx_chan);
+
+       netif_carrier_on(dev);
+       netif_start_queue(dev);
+
+       /* apply override config for bypass_link ports here. */
+       for (i = 0; i < priv->num_ports; i++) {
+               struct bcm63xx_enetsw_port *port;
+               u8 override;
+               port = &priv->used_ports[i];
+               if (!port->used)
+                       continue;
+
+               if (!port->bypass_link)
+                       continue;
+
+               override = ENETSW_PORTOV_ENABLE_MASK |
+                       ENETSW_PORTOV_LINKUP_MASK;
+
+               switch (port->force_speed) {
+               case 1000:
+                       override |= ENETSW_IMPOV_1000_MASK;
+                       break;
+               case 100:
+                       override |= ENETSW_IMPOV_100_MASK;
+                       break;
+               case 10:
+                       break;
+               default:
+                       pr_warn("invalid forced speed on port %s: assume 10\n",
+                              port->name);
+                       break;
+               }
+
+               if (port->force_duplex_full)
+                       override |= ENETSW_IMPOV_FDX_MASK;
+
+
+               enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
+               enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
+       }
+
+       /* start phy polling timer */
+       init_timer(&priv->swphy_poll);
+       priv->swphy_poll.function = swphy_poll_timer;
+       priv->swphy_poll.data = (unsigned long)priv;
+       priv->swphy_poll.expires = jiffies;
+       add_timer(&priv->swphy_poll);
+       return 0;
+
+out:
+       for (i = 0; i < priv->rx_ring_size; i++) {
+               struct bcm_enet_desc *desc;
+
+               if (!priv->rx_skb[i])
+                       continue;
+
+               desc = &priv->rx_desc_cpu[i];
+               dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+                                DMA_FROM_DEVICE);
+               kfree_skb(priv->rx_skb[i]);
+       }
+       kfree(priv->rx_skb);
+
+out_free_tx_skb:
+       kfree(priv->tx_skb);
+
+out_free_tx_ring:
+       dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+                         priv->tx_desc_cpu, priv->tx_desc_dma);
+
+out_free_rx_ring:
+       dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+                         priv->rx_desc_cpu, priv->rx_desc_dma);
+
+out_freeirq_tx:
+       if (priv->irq_tx != -1)
+               free_irq(priv->irq_tx, dev);
+
+out_freeirq_rx:
+       free_irq(priv->irq_rx, dev);
+
+out_freeirq:
+       return ret;
+}
+
+/* stop callback */
+static int bcm_enetsw_stop(struct net_device *dev)
+{
+       struct bcm_enet_priv *priv;
+       struct device *kdev;
+       int i;
+
+       priv = netdev_priv(dev);
+       kdev = &priv->pdev->dev;
+
+       del_timer_sync(&priv->swphy_poll);
+       netif_stop_queue(dev);
+       napi_disable(&priv->napi);
+       del_timer_sync(&priv->rx_timeout);
+
+       /* mask all interrupts */
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+       enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
+
+       /* disable dma & mac */
+       bcm_enet_disable_dma(priv, priv->tx_chan);
+       bcm_enet_disable_dma(priv, priv->rx_chan);
+
+       /* force reclaim of all tx buffers */
+       bcm_enet_tx_reclaim(dev, 1);
+
+       /* free the rx skb ring */
+       for (i = 0; i < priv->rx_ring_size; i++) {
+               struct bcm_enet_desc *desc;
+
+               if (!priv->rx_skb[i])
+                       continue;
+
+               desc = &priv->rx_desc_cpu[i];
+               dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+                                DMA_FROM_DEVICE);
+               kfree_skb(priv->rx_skb[i]);
+       }
+
+       /* free remaining allocated memory */
+       kfree(priv->rx_skb);
+       kfree(priv->tx_skb);
+       dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+                         priv->rx_desc_cpu, priv->rx_desc_dma);
+       dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+                         priv->tx_desc_cpu, priv->tx_desc_dma);
+       if (priv->irq_tx != -1)
+               free_irq(priv->irq_tx, dev);
+       free_irq(priv->irq_rx, dev);
+
+       return 0;
+}
+
+/* try to sort out phy external status by walking the used_port field
+ * in the bcm_enet_priv structure. in case the phy address is not
+ * assigned to any physical port on the switch, assume it is external
+ * (and yell at the user).
+ */
+static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
+{
+       int i;
+
+       for (i = 0; i < priv->num_ports; ++i) {
+               if (!priv->used_ports[i].used)
+                       continue;
+               if (priv->used_ports[i].phy_id == phy_id)
+                       return bcm_enet_port_is_rgmii(i);
+       }
+
+       printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
+                   phy_id);
+       return 1;
+}
+
+/* can't use bcmenet_sw_mdio_read directly as we need to sort out
+ * external/internal status of the given phy_id first.
+ */
+static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
+                                   int location)
+{
+       struct bcm_enet_priv *priv;
+
+       priv = netdev_priv(dev);
+       return bcmenet_sw_mdio_read(priv,
+                                   bcm_enetsw_phy_is_external(priv, phy_id),
+                                   phy_id, location);
+}
+
+/* can't use bcmenet_sw_mdio_write directly as we need to sort out
+ * external/internal status of the given phy_id first.
+ */
+static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
+                                     int location,
+                                     int val)
+{
+       struct bcm_enet_priv *priv;
+
+       priv = netdev_priv(dev);
+       bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
+                             phy_id, location, val);
+}
+
+static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct mii_if_info mii;
+
+       mii.dev = dev;
+       mii.mdio_read = bcm_enetsw_mii_mdio_read;
+       mii.mdio_write = bcm_enetsw_mii_mdio_write;
+       mii.phy_id = 0;
+       mii.phy_id_mask = 0x3f;
+       mii.reg_num_mask = 0x1f;
+       return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
+
+}
+
+static const struct net_device_ops bcm_enetsw_ops = {
+       .ndo_open               = bcm_enetsw_open,
+       .ndo_stop               = bcm_enetsw_stop,
+       .ndo_start_xmit         = bcm_enet_start_xmit,
+       .ndo_change_mtu         = bcm_enet_change_mtu,
+       .ndo_do_ioctl           = bcm_enetsw_ioctl,
+};
+
+
+static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
+       { "rx_packets", DEV_STAT(rx_packets), -1 },
+       { "tx_packets", DEV_STAT(tx_packets), -1 },
+       { "rx_bytes", DEV_STAT(rx_bytes), -1 },
+       { "tx_bytes", DEV_STAT(tx_bytes), -1 },
+       { "rx_errors", DEV_STAT(rx_errors), -1 },
+       { "tx_errors", DEV_STAT(tx_errors), -1 },
+       { "rx_dropped", DEV_STAT(rx_dropped), -1 },
+       { "tx_dropped", DEV_STAT(tx_dropped), -1 },
+
+       { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
+       { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
+       { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
+       { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
+       { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
+       { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
+       { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
+       { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
+       { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
+       { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
+         ETHSW_MIB_RX_1024_1522 },
+       { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
+         ETHSW_MIB_RX_1523_2047 },
+       { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
+         ETHSW_MIB_RX_2048_4095 },
+       { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
+         ETHSW_MIB_RX_4096_8191 },
+       { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
+         ETHSW_MIB_RX_8192_9728 },
+       { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
+       { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
+       { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
+       { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
+       { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
+
+       { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
+       { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
+       { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
+       { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
+       { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
+       { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
+
+};
+
+#define BCM_ENETSW_STATS_LEN   \
+       (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
+
+static void bcm_enetsw_get_strings(struct net_device *netdev,
+                                  u32 stringset, u8 *data)
+{
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                              bcm_enetsw_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+               }
+               break;
+       }
+}
+
+static int bcm_enetsw_get_sset_count(struct net_device *netdev,
+                                    int string_set)
+{
+       switch (string_set) {
+       case ETH_SS_STATS:
+               return BCM_ENETSW_STATS_LEN;
+       default:
+               return -EINVAL;
+       }
+}
+
+static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
+                                  struct ethtool_drvinfo *drvinfo)
+{
+       strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
+       strncpy(drvinfo->version, bcm_enet_driver_version, 32);
+       strncpy(drvinfo->fw_version, "N/A", 32);
+       strncpy(drvinfo->bus_info, "bcm63xx", 32);
+       drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
+}
+
+static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
+                                        struct ethtool_stats *stats,
+                                        u64 *data)
+{
+       struct bcm_enet_priv *priv;
+       int i;
+
+       priv = netdev_priv(netdev);
+
+       for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
+               const struct bcm_enet_stats *s;
+               u32 lo, hi;
+               char *p;
+               int reg;
+
+               s = &bcm_enetsw_gstrings_stats[i];
+
+               reg = s->mib_reg;
+               if (reg == -1)
+                       continue;
+
+               lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
+               p = (char *)priv + s->stat_offset;
+
+               if (s->sizeof_stat == sizeof(u64)) {
+                       hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
+                       *(u64 *)p = ((u64)hi << 32 | lo);
+               } else {
+                       *(u32 *)p = lo;
+               }
+       }
+
+       for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
+               const struct bcm_enet_stats *s;
+               char *p;
+
+               s = &bcm_enetsw_gstrings_stats[i];
+
+               if (s->mib_reg == -1)
+                       p = (char *)&netdev->stats + s->stat_offset;
+               else
+                       p = (char *)priv + s->stat_offset;
+
+               data[i] = (s->sizeof_stat == sizeof(u64)) ?
+                       *(u64 *)p : *(u32 *)p;
+       }
+}
+
+static void bcm_enetsw_get_ringparam(struct net_device *dev,
+                                    struct ethtool_ringparam *ering)
+{
+       struct bcm_enet_priv *priv;
+
+       priv = netdev_priv(dev);
+
+       /* rx/tx ring is actually only limited by memory */
+       ering->rx_max_pending = 8192;
+       ering->tx_max_pending = 8192;
+       ering->rx_mini_max_pending = 0;
+       ering->rx_jumbo_max_pending = 0;
+       ering->rx_pending = priv->rx_ring_size;
+       ering->tx_pending = priv->tx_ring_size;
+}
+
+static int bcm_enetsw_set_ringparam(struct net_device *dev,
+                                   struct ethtool_ringparam *ering)
+{
+       struct bcm_enet_priv *priv;
+       int was_running;
+
+       priv = netdev_priv(dev);
+
+       was_running = 0;
+       if (netif_running(dev)) {
+               bcm_enetsw_stop(dev);
+               was_running = 1;
+       }
+
+       priv->rx_ring_size = ering->rx_pending;
+       priv->tx_ring_size = ering->tx_pending;
+
+       if (was_running) {
+               int err;
+
+               err = bcm_enetsw_open(dev);
+               if (err)
+                       dev_close(dev);
+       }
+       return 0;
+}
+
+static struct ethtool_ops bcm_enetsw_ethtool_ops = {
+       .get_strings            = bcm_enetsw_get_strings,
+       .get_sset_count         = bcm_enetsw_get_sset_count,
+       .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
+       .get_drvinfo            = bcm_enetsw_get_drvinfo,
+       .get_ringparam          = bcm_enetsw_get_ringparam,
+       .set_ringparam          = bcm_enetsw_set_ringparam,
+};
+
+/* allocate netdevice, request register memory and register device. */
+static int bcm_enetsw_probe(struct platform_device *pdev)
+{
+       struct bcm_enet_priv *priv;
+       struct net_device *dev;
+       struct bcm63xx_enetsw_platform_data *pd;
+       struct resource *res_mem;
+       int ret, irq_rx, irq_tx;
+
+       /* stop if shared driver failed, assume driver->probe will be
+        * called in the same order we register devices (correct ?)
+        */
+       if (!bcm_enet_shared_base[0])
+               return -ENODEV;
+
+       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq_rx = platform_get_irq(pdev, 0);
+       irq_tx = platform_get_irq(pdev, 1);
+       if (!res_mem || irq_rx < 0)
+               return -ENODEV;
+
+       ret = 0;
+       dev = alloc_etherdev(sizeof(*priv));
+       if (!dev)
+               return -ENOMEM;
+       priv = netdev_priv(dev);
+       memset(priv, 0, sizeof(*priv));
+
+       /* initialize default and fetch platform data */
+       priv->enet_is_sw = true;
+       priv->irq_rx = irq_rx;
+       priv->irq_tx = irq_tx;
+       priv->rx_ring_size = BCMENET_DEF_RX_DESC;
+       priv->tx_ring_size = BCMENET_DEF_TX_DESC;
+       priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
+
+       pd = pdev->dev.platform_data;
+       if (pd) {
+               memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+               memcpy(priv->used_ports, pd->used_ports,
+                      sizeof(pd->used_ports));
+               priv->num_ports = pd->num_ports;
+               priv->dma_has_sram = pd->dma_has_sram;
+               priv->dma_chan_en_mask = pd->dma_chan_en_mask;
+               priv->dma_chan_int_mask = pd->dma_chan_int_mask;
+               priv->dma_chan_width = pd->dma_chan_width;
+       }
+
+       ret = compute_hw_mtu(priv, dev->mtu);
+       if (ret)
+               goto out;
+
+       if (!request_mem_region(res_mem->start, resource_size(res_mem),
+                               "bcm63xx_enetsw")) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       priv->base = ioremap(res_mem->start, resource_size(res_mem));
+       if (priv->base == NULL) {
+               ret = -ENOMEM;
+               goto out_release_mem;
+       }
+
+       priv->mac_clk = clk_get(&pdev->dev, "enetsw");
+       if (IS_ERR(priv->mac_clk)) {
+               ret = PTR_ERR(priv->mac_clk);
+               goto out_unmap;
+       }
+       clk_enable(priv->mac_clk);
+
+       priv->rx_chan = 0;
+       priv->tx_chan = 1;
+       spin_lock_init(&priv->rx_lock);
+
+       /* init rx timeout (used for oom) */
+       init_timer(&priv->rx_timeout);
+       priv->rx_timeout.function = bcm_enet_refill_rx_timer;
+       priv->rx_timeout.data = (unsigned long)dev;
+
+       /* register netdevice */
+       dev->netdev_ops = &bcm_enetsw_ops;
+       netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+       SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       spin_lock_init(&priv->enetsw_mdio_lock);
+
+       ret = register_netdev(dev);
+       if (ret)
+               goto out_put_clk;
+
+       netif_carrier_off(dev);
+       platform_set_drvdata(pdev, dev);
+       priv->pdev = pdev;
+       priv->net_dev = dev;
+
+       return 0;
+
+out_put_clk:
+       clk_put(priv->mac_clk);
+
+out_unmap:
+       iounmap(priv->base);
+
+out_release_mem:
+       release_mem_region(res_mem->start, resource_size(res_mem));
+out:
+       free_netdev(dev);
+       return ret;
+}
+
+
+/* exit func, stops hardware and unregisters netdevice */
+static int bcm_enetsw_remove(struct platform_device *pdev)
+{
+       struct bcm_enet_priv *priv;
+       struct net_device *dev;
+       struct resource *res;
+
+       /* stop netdevice */
+       dev = platform_get_drvdata(pdev);
+       priv = netdev_priv(dev);
+       unregister_netdev(dev);
+
+       /* release device resources */
+       iounmap(priv->base);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(res->start, resource_size(res));
+
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(dev);
+       return 0;
+}
+
+struct platform_driver bcm63xx_enetsw_driver = {
+       .probe  = bcm_enetsw_probe,
+       .remove = bcm_enetsw_remove,
+       .driver = {
+               .name   = "bcm63xx_enetsw",
+               .owner  = THIS_MODULE,
+       },
+};
+
+/* reserve & remap memory space shared between all macs */
+static int bcm_enet_shared_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       void __iomem *p[3];
+       unsigned int i;
+
+       memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
+
+       for (i = 0; i < 3; i++) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+               p[i] = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(p[i]))
+                       return PTR_ERR(p[i]);
+       }
+
+       memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
+
+       return 0;
+}
+
+static int bcm_enet_shared_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+/* this "shared" driver is needed because both macs share a single
+ * address space
+ */
+struct platform_driver bcm63xx_enet_shared_driver = {
+       .probe  = bcm_enet_shared_probe,
+       .remove = bcm_enet_shared_remove,
+       .driver = {
+               .name   = "bcm63xx_enet_shared",
+               .owner  = THIS_MODULE,
+       },
+};
+
+/* entry point */
+static int __init bcm_enet_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&bcm63xx_enet_shared_driver);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&bcm63xx_enet_driver);
+       if (ret)
+               platform_driver_unregister(&bcm63xx_enet_shared_driver);
+
+       ret = platform_driver_register(&bcm63xx_enetsw_driver);
+       if (ret) {
+               platform_driver_unregister(&bcm63xx_enet_driver);
+               platform_driver_unregister(&bcm63xx_enet_shared_driver);
+       }
 
        return ret;
 }
@@ -1918,6 +2913,7 @@ static int __init bcm_enet_init(void)
 static void __exit bcm_enet_exit(void)
 {
        platform_driver_unregister(&bcm63xx_enet_driver);
+       platform_driver_unregister(&bcm63xx_enetsw_driver);
        platform_driver_unregister(&bcm63xx_enet_shared_driver);
 }
 
index 133d5857b9e280c009356c816a1d341eae4ccec3..f55af4310085a1d433b567a21ecb3dfe6979c50f 100644 (file)
@@ -18,6 +18,7 @@
 
 /* maximum burst len for dma (4 bytes unit) */
 #define BCMENET_DMA_MAXBURST   16
+#define BCMENETSW_DMA_MAXBURST 8
 
 /* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
  * must be low enough so that a DMA transfer of above burst length can
 #define ETH_MIB_RX_CNTRL                       54
 
 
+/*
+ * SW MIB Counters register definitions
+*/
+#define ETHSW_MIB_TX_ALL_OCT                   0
+#define ETHSW_MIB_TX_DROP_PKTS                 2
+#define ETHSW_MIB_TX_QOS_PKTS                  3
+#define ETHSW_MIB_TX_BRDCAST                   4
+#define ETHSW_MIB_TX_MULT                      5
+#define ETHSW_MIB_TX_UNI                       6
+#define ETHSW_MIB_TX_COL                       7
+#define ETHSW_MIB_TX_1_COL                     8
+#define ETHSW_MIB_TX_M_COL                     9
+#define ETHSW_MIB_TX_DEF                       10
+#define ETHSW_MIB_TX_LATE                      11
+#define ETHSW_MIB_TX_EX_COL                    12
+#define ETHSW_MIB_TX_PAUSE                     14
+#define ETHSW_MIB_TX_QOS_OCT                   15
+
+#define ETHSW_MIB_RX_ALL_OCT                   17
+#define ETHSW_MIB_RX_UND                       19
+#define ETHSW_MIB_RX_PAUSE                     20
+#define ETHSW_MIB_RX_64                                21
+#define ETHSW_MIB_RX_65_127                    22
+#define ETHSW_MIB_RX_128_255                   23
+#define ETHSW_MIB_RX_256_511                   24
+#define ETHSW_MIB_RX_512_1023                  25
+#define ETHSW_MIB_RX_1024_1522                 26
+#define ETHSW_MIB_RX_OVR                       27
+#define ETHSW_MIB_RX_JAB                       28
+#define ETHSW_MIB_RX_ALIGN                     29
+#define ETHSW_MIB_RX_CRC                       30
+#define ETHSW_MIB_RX_GD_OCT                    31
+#define ETHSW_MIB_RX_DROP                      33
+#define ETHSW_MIB_RX_UNI                       34
+#define ETHSW_MIB_RX_MULT                      35
+#define ETHSW_MIB_RX_BRDCAST                   36
+#define ETHSW_MIB_RX_SA_CHANGE                 37
+#define ETHSW_MIB_RX_FRAG                      38
+#define ETHSW_MIB_RX_OVR_DISC                  39
+#define ETHSW_MIB_RX_SYM                       40
+#define ETHSW_MIB_RX_QOS_PKTS                  41
+#define ETHSW_MIB_RX_QOS_OCT                   42
+#define ETHSW_MIB_RX_1523_2047                 44
+#define ETHSW_MIB_RX_2048_4095                 45
+#define ETHSW_MIB_RX_4096_8191                 46
+#define ETHSW_MIB_RX_8192_9728                 47
+
+
 struct bcm_enet_mib_counters {
        u64 tx_gd_octets;
        u32 tx_gd_pkts;
        u32 tx_all_octets;
        u32 tx_all_pkts;
+       u32 tx_unicast;
        u32 tx_brdcast;
        u32 tx_mult;
        u32 tx_64;
@@ -97,7 +147,12 @@ struct bcm_enet_mib_counters {
        u32 tx_256_511;
        u32 tx_512_1023;
        u32 tx_1024_max;
+       u32 tx_1523_2047;
+       u32 tx_2048_4095;
+       u32 tx_4096_8191;
+       u32 tx_8192_9728;
        u32 tx_jab;
+       u32 tx_drop;
        u32 tx_ovr;
        u32 tx_frag;
        u32 tx_underrun;
@@ -114,6 +169,7 @@ struct bcm_enet_mib_counters {
        u32 rx_all_octets;
        u32 rx_all_pkts;
        u32 rx_brdcast;
+       u32 rx_unicast;
        u32 rx_mult;
        u32 rx_64;
        u32 rx_65_127;
@@ -197,6 +253,9 @@ struct bcm_enet_priv {
        /* number of dma desc in tx ring */
        int tx_ring_size;
 
+       /* maximum dma burst size */
+       int dma_maxburst;
+
        /* cpu view of rx dma ring */
        struct bcm_enet_desc *tx_desc_cpu;
 
@@ -269,6 +328,33 @@ struct bcm_enet_priv {
 
        /* maximum hardware transmit/receive size */
        unsigned int hw_mtu;
+
+       bool enet_is_sw;
+
+       /* port mapping for switch devices */
+       int num_ports;
+       struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
+       int sw_port_link[ENETSW_MAX_PORT];
+
+       /* used to poll switch port state */
+       struct timer_list swphy_poll;
+       spinlock_t enetsw_mdio_lock;
+
+       /* dma channel enable mask */
+       u32 dma_chan_en_mask;
+
+       /* dma channel interrupt mask */
+       u32 dma_chan_int_mask;
+
+       /* DMA engine has internal SRAM */
+       bool dma_has_sram;
+
+       /* dma channel width */
+       unsigned int dma_chan_width;
+
+       /* dma descriptor shift value */
+       unsigned int dma_desc_shift;
 };
 
+
 #endif /* ! BCM63XX_ENET_H_ */
index 5d204492c603a84fd94f03faf5fc1179abf61208..6a2de1d79ff6c011188f2ccfb8e48c315d93825c 100644 (file)
@@ -8104,7 +8104,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        pci_set_master(pdev);
 
-       bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+       bp->pm_cap = pdev->pm_cap;
        if (bp->pm_cap == 0) {
                dev_err(&pdev->dev,
                        "Cannot find power management capability, aborting\n");
@@ -8764,18 +8764,4 @@ static struct pci_driver bnx2_pci_driver = {
        .err_handler    = &bnx2_err_handler,
 };
 
-static int __init bnx2_init(void)
-{
-       return pci_register_driver(&bnx2_pci_driver);
-}
-
-static void __exit bnx2_cleanup(void)
-{
-       pci_unregister_driver(&bnx2_pci_driver);
-}
-
-module_init(bnx2_init);
-module_exit(bnx2_cleanup);
-
-
-
+module_pci_driver(bnx2_pci_driver);
index 3dba2a70a00e41f6ab86de036b4a3a7d859fd105..dedbd76c033ede4d695e3c7498f99ff263ca860b 100644 (file)
 #define BCM_DCBNL
 #endif
 
-
 #include "bnx2x_hsi.h"
 
 #include "../cnic_if.h"
 
-
 #define BNX2X_MIN_MSIX_VEC_CNT(bp)             ((bp)->min_msix_vec_cnt)
 
 #include <linux/mdio.h>
@@ -114,7 +112,6 @@ do {                                                                \
 #define BNX2X_ERROR(fmt, ...)                                  \
        pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
 
-
 /* before we have a dev->name use dev_info() */
 #define BNX2X_DEV_INFO(fmt, ...)                                \
 do {                                                            \
@@ -147,7 +144,6 @@ do {                                                \
 #define U64_HI(x)                      ((u32)(((u64)(x)) >> 32))
 #define HILO_U64(hi, lo)               ((((u64)(hi)) << 32) + (lo))
 
-
 #define REG_ADDR(bp, offset)           ((bp->regview) + (offset))
 
 #define REG_RD(bp, offset)             readl(REG_ADDR(bp, offset))
@@ -366,7 +362,7 @@ union db_prod {
 /*
  * Number of required  SGEs is the sum of two:
  * 1. Number of possible opened aggregations (next packet for
- *    these aggregations will probably consume SGE immidiatelly)
+ *    these aggregations will probably consume SGE immediately)
  * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
  *    after placement on BD for new TPA aggregation)
  *
@@ -387,7 +383,6 @@ union db_prod {
 #define BIT_VEC64_ELEM_SHIFT           6
 #define BIT_VEC64_ELEM_MASK            ((u64)BIT_VEC64_ELEM_SZ - 1)
 
-
 #define __BIT_VEC64_SET_BIT(el, bit) \
        do { \
                el = ((el) | ((u64)0x1 << (bit))); \
@@ -398,7 +393,6 @@ union db_prod {
                el = ((el) & (~((u64)0x1 << (bit)))); \
        } while (0)
 
-
 #define BIT_VEC64_SET_BIT(vec64, idx) \
        __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
                           (idx) & BIT_VEC64_ELEM_MASK)
@@ -419,8 +413,6 @@ union db_prod {
 
 /*******************************************************/
 
-
-
 /* Number of u64 elements in SGE mask array */
 #define RX_SGE_MASK_LEN                        (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
 #define RX_SGE_MASK_LEN_MASK           (RX_SGE_MASK_LEN - 1)
@@ -493,11 +485,26 @@ struct bnx2x_fastpath {
        struct bnx2x            *bp; /* parent */
 
        struct napi_struct      napi;
+
+#ifdef CONFIG_NET_LL_RX_POLL
+       unsigned int state;
+#define BNX2X_FP_STATE_IDLE                  0
+#define BNX2X_FP_STATE_NAPI            (1 << 0)    /* NAPI owns this FP */
+#define BNX2X_FP_STATE_POLL            (1 << 1)    /* poll owns this FP */
+#define BNX2X_FP_STATE_NAPI_YIELD      (1 << 2)    /* NAPI yielded this FP */
+#define BNX2X_FP_STATE_POLL_YIELD      (1 << 3)    /* poll yielded this FP */
+#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
+#define BNX2X_FP_LOCKED        (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
+       /* protect state */
+       spinlock_t lock;
+#endif /* CONFIG_NET_LL_RX_POLL */
+
        union host_hc_status_block      status_blk;
-       /* chip independed shortcuts into sb structure */
+       /* chip independent shortcuts into sb structure */
        __le16                  *sb_index_values;
        __le16                  *sb_running_index;
-       /* chip independed shortcut into rx_prods_offset memory */
+       /* chip independent shortcut into rx_prods_offset memory */
        u32                     ustorm_rx_prods_offset;
 
        u32                     rx_buf_size;
@@ -565,6 +572,116 @@ struct bnx2x_fastpath {
 #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
 #define bnx2x_fp_qstats(bp, fp)        (&((bp)->fp_stats[(fp)->index].eth_q_stats))
 
+#ifdef CONFIG_NET_LL_RX_POLL
+static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+{
+       spin_lock_init(&fp->lock);
+       fp->state = BNX2X_FP_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a FP */
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+       bool rc = true;
+
+       spin_lock(&fp->lock);
+       if (fp->state & BNX2X_FP_LOCKED) {
+               WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+               fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
+               rc = false;
+       } else {
+               /* we don't care if someone yielded */
+               fp->state = BNX2X_FP_STATE_NAPI;
+       }
+       spin_unlock(&fp->lock);
+       return rc;
+}
+
+/* returns true is someone tried to get the FP while napi had it */
+static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+       bool rc = false;
+
+       spin_lock(&fp->lock);
+       WARN_ON(fp->state &
+               (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
+
+       if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+               rc = true;
+       fp->state = BNX2X_FP_STATE_IDLE;
+       spin_unlock(&fp->lock);
+       return rc;
+}
+
+/* called from bnx2x_low_latency_poll() */
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+       bool rc = true;
+
+       spin_lock_bh(&fp->lock);
+       if ((fp->state & BNX2X_FP_LOCKED)) {
+               fp->state |= BNX2X_FP_STATE_POLL_YIELD;
+               rc = false;
+       } else {
+               /* preserve yield marks */
+               fp->state |= BNX2X_FP_STATE_POLL;
+       }
+       spin_unlock_bh(&fp->lock);
+       return rc;
+}
+
+/* returns true if someone tried to get the FP while it was locked */
+static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+       bool rc = false;
+
+       spin_lock_bh(&fp->lock);
+       WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+
+       if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+               rc = true;
+       fp->state = BNX2X_FP_STATE_IDLE;
+       spin_unlock_bh(&fp->lock);
+       return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+       WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
+       return fp->state & BNX2X_FP_USER_PEND;
+}
+#else
+static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+{
+}
+
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+       return true;
+}
+
+static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+       return false;
+}
+
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+       return false;
+}
+
+static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+       return false;
+}
+
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+       return false;
+}
+#endif /* CONFIG_NET_LL_RX_POLL */
+
 /* Use 2500 as a mini-jumbo MTU for FCoE */
 #define BNX2X_FCOE_MINI_JUMBO_MTU      2500
 
@@ -580,12 +697,10 @@ struct bnx2x_fastpath {
                                                txdata_ptr[FIRST_TX_COS_INDEX] \
                                                ->var)
 
-
 #define IS_ETH_FP(fp)          ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
 #define IS_FCOE_FP(fp)         ((fp)->index == FCOE_IDX((fp)->bp))
 #define IS_FCOE_IDX(idx)       ((idx) == FCOE_IDX(bp))
 
-
 /* MC hsi */
 #define MAX_FETCH_BD           13      /* HW max BDs per packet */
 #define RX_COPY_THRESH         92
@@ -613,7 +728,7 @@ struct bnx2x_fastpath {
  * START_BD(splitted)  - includes unpaged data segment for GSO
  * PARSING_BD          - for TSO and CSUM data
  * PARSING_BD2         - for encapsulation data
- * Frag BDs            - decribes pages for frags
+ * Frag BDs            - describes pages for frags
  */
 #define BDS_PER_TX_PKT         4
 #define MAX_BDS_PER_TX_PKT     (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
@@ -693,12 +808,10 @@ struct bnx2x_fastpath {
                                 FW_DROP_LEVEL(bp))
 #define RCQ_TH_HI(bp)          (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
 
-
 /* This is needed for determining of last_max */
 #define SUB_S16(a, b)          (s16)((s16)(a) - (s16)(b))
 #define SUB_S32(a, b)          (s32)((s32)(a) - (s32)(b))
 
-
 #define BNX2X_SWCID_SHIFT      17
 #define BNX2X_SWCID_MASK       ((0x1 << BNX2X_SWCID_SHIFT) - 1)
 
@@ -723,7 +836,6 @@ struct bnx2x_fastpath {
                       DPM_TRIGER_TYPE); \
        } while (0)
 
-
 /* TX CSUM helpers */
 #define SKB_CS_OFF(skb)                (offsetof(struct tcphdr, check) - \
                                 skb->csum_offset)
@@ -766,7 +878,6 @@ struct bnx2x_fastpath {
 #define BNX2X_RX_SUM_FIX(cqe) \
        BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
 
-
 #define FP_USB_FUNC_OFF        \
                        offsetof(struct cstorm_status_block_u, func)
 #define FP_CSB_FUNC_OFF        \
@@ -900,14 +1011,14 @@ struct bnx2x_common {
 #define CHIP_IS_E3A0(bp)               (CHIP_IS_E3(bp) && \
                                         (CHIP_REV(bp) == CHIP_REV_Ax))
 /* This define is used in two main places:
- * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
+ * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
  * to nic-only mode or to offload mode. Offload mode is configured if either the
  * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
  * registered for this port (which means that the user wants storage services).
  * 2. During cnic-related load, to know if offload mode is already configured in
- * the HW or needs to be configrued.
+ * the HW or needs to be configured.
  * Since the transition from nic-mode to offload-mode in HW causes traffic
- * coruption, nic-mode is configured only in ports on which storage services
+ * corruption, nic-mode is configured only in ports on which storage services
  * where never requested.
  */
 #define CONFIGURE_NIC_MODE(bp)         (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
@@ -1008,14 +1119,14 @@ extern struct workqueue_struct *bnx2x_wq;
  * If the maximum number of FP-SB available is X then:
  * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
  *    regular L2 queues is Y=X-1
- * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
  * c. If the FCoE L2 queue is supported the actual number of L2 queues
  *    is Y+1
  * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
  *    slow-path interrupts) or Y+2 if CNIC is supported (one additional
  *    FP interrupt context for the CNIC).
  * e. The number of HW context (CID count) is always X or X+1 if FCoE
- *    L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ *    L2 queue is supported. The cid for the FCoE L2 queue is always X.
  */
 
 /* fast-path interrupt contexts E1x */
@@ -1068,7 +1179,6 @@ struct bnx2x_slowpath {
                struct eth_classify_rules_ramrod_data   e2;
        } mac_rdata;
 
-
        union {
                struct tstorm_eth_mac_filter_config     e1x;
                struct eth_filter_rules_ramrod_data     e2;
@@ -1119,7 +1229,6 @@ struct bnx2x_slowpath {
 #define bnx2x_sp_mapping(bp, var) \
                (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
 
-
 /* attn group wiring */
 #define MAX_DYNAMIC_ATTN_GRPS          8
 
@@ -1221,11 +1330,11 @@ enum {
        BNX2X_SP_RTNL_AFEX_F_UPDATE,
        BNX2X_SP_RTNL_ENABLE_SRIOV,
        BNX2X_SP_RTNL_VFPF_MCAST,
+       BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
        BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 };
 
-
 struct bnx2x_prev_path_list {
        struct list_head list;
        u8 bus;
@@ -1392,6 +1501,7 @@ struct bnx2x {
 #define USING_SINGLE_MSIX_FLAG         (1 << 20)
 #define BC_SUPPORTS_DCBX_MSG_NON_PMF   (1 << 21)
 #define IS_VF_FLAG                     (1 << 22)
+#define INTERRUPTS_ENABLED_FLAG                (1 << 23)
 
 #define BP_NOMCP(bp)                   ((bp)->flags & NO_MCP_FLAG)
 
@@ -1585,7 +1695,7 @@ struct bnx2x {
        struct mutex            cnic_mutex;
        struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
 
-       /* Start index of the "special" (CNIC related) L2 cleints */
+       /* Start index of the "special" (CNIC related) L2 clients */
        u8                              cnic_base_cl_id;
 
        int                     dmae_ready;
@@ -1699,7 +1809,7 @@ struct bnx2x {
        /* operation indication for the sp_rtnl task */
        unsigned long                           sp_rtnl_state;
 
-       /* DCBX Negotation results */
+       /* DCBX Negotiation results */
        struct dcbx_features                    dcbx_local_feat;
        u32                                     dcbx_error;
 
@@ -1755,7 +1865,6 @@ extern int num_queues;
 #define FUNC_FLG_SPQ           0x0010
 #define FUNC_FLG_LEADING       0x0020  /* PF only */
 
-
 struct bnx2x_func_init_params {
        /* dma */
        dma_addr_t      fw_stat_map;    /* valid iff FUNC_FLG_STATS */
@@ -1853,9 +1962,6 @@ struct bnx2x_func_init_params {
 
 #define skip_queue(bp, idx)    (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
-
-
-
 /**
  * bnx2x_set_mac_one - configure a single MAC address
  *
@@ -1921,7 +2027,6 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
                               u8 src_type, u8 dst_type);
 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
 
 /* FLR related routines */
 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -1937,6 +2042,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 void bnx2x_update_coalesce(struct bnx2x *bp);
 int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
 
+bool bnx2x_port_after_undi(struct bnx2x *bp);
+
 static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
                           int wait)
 {
@@ -1998,7 +2105,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define UNLOAD_CLOSE                   1
 #define UNLOAD_RECOVERY                        2
 
-
 /* DMAE command defines */
 #define DMAE_TIMEOUT                   -1
 #define DMAE_PCI_ERROR                 -2      /* E2 and onward */
@@ -2062,7 +2168,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define DMAE_LEN32_WR_MAX(bp)          (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
 
 #define DMAE_COMP_VAL                  0x60d0d0ae /* E2 and on - upper bit
-                                                       indicates eror */
+                                                   * indicates error
+                                                   */
 
 #define MAX_DMAE_C_PER_PORT            8
 #define INIT_DMAE_C(bp)                        (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -2100,7 +2207,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define SP_DESC_CNT            (BCM_PAGE_SIZE / sizeof(struct eth_spe))
 #define MAX_SP_DESC_CNT                        (SP_DESC_CNT - 1)
 
-
 #define BNX2X_BTR                      4
 #define MAX_SPQ_PENDING                        8
 
@@ -2137,6 +2243,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define ATTN_HARD_WIRED_MASK           0xff00
 #define ATTENTION_ID                   4
 
+#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \
+                                IS_MF_FCOE_AFEX(bp))
 
 /* stuff added to make the code fit 80Col */
 
@@ -2338,4 +2446,9 @@ enum {
 
 #define NUM_MACS       8
 
+enum bnx2x_pci_bus_speed {
+       BNX2X_PCI_LINK_SPEED_2500 = 2500,
+       BNX2X_PCI_LINK_SPEED_5000 = 5000,
+       BNX2X_PCI_LINK_SPEED_8000 = 8000
+};
 #endif /* bnx2x.h */
index b8fbe266ab68f1619a18b7e49e9d6df19a5f0625..ec3aa1d451e8d4954ec09d1b303b30d46bf08b2d 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/tcp.h>
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
+#include <net/ll_poll.h>
 #include <linux/prefetch.h>
 #include "bnx2x_cmn.h"
 #include "bnx2x_init.h"
@@ -124,7 +125,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
        int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
 
        /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
-        * backward along the array could cause memory to be overriden
+        * backward along the array could cause memory to be overridden
         */
        for (cos = 1; cos < bp->max_cos; cos++) {
                for (i = 0; i < old_eth_num - delta; i++) {
@@ -165,7 +166,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
                         BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 
-
        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 #ifdef BNX2X_STOP_ON_ERROR
        if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
@@ -259,7 +259,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
        smp_mb();
 
        if (unlikely(netif_tx_queue_stopped(txq))) {
-               /* Taking tx_lock() is needed to prevent reenabling the queue
+               /* Taking tx_lock() is needed to prevent re-enabling the queue
                 * while it's empty. This could have happen if rx_action() gets
                 * suspended in bnx2x_tx_int() after the condition before
                 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
@@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        return err;
                }
 
-               /* Unmap the page as we r going to pass it to the stack */
+               /* Unmap the page as we're going to pass it to the stack */
                dma_unmap_page(&bp->pdev->dev,
                               dma_unmap_addr(&old_rx_pg, mapping),
                               SGE_PAGES, DMA_FROM_DEVICE);
@@ -733,7 +733,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        dev_kfree_skb_any(skb);
                }
 
-
                /* put new data in bin */
                rx_buf->data = new_data;
 
@@ -805,40 +804,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
        struct bnx2x *bp = fp->bp;
        u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
-       u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+       u16 sw_comp_cons, sw_comp_prod;
        int rx_pkt = 0;
+       union eth_rx_cqe *cqe;
+       struct eth_fast_path_rx_cqe *cqe_fp;
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (unlikely(bp->panic))
                return 0;
 #endif
 
-       /* CQ "next element" is of the size of the regular element,
-          that's why it's ok here */
-       hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
-       if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
-               hw_comp_cons++;
-
        bd_cons = fp->rx_bd_cons;
        bd_prod = fp->rx_bd_prod;
        bd_prod_fw = bd_prod;
        sw_comp_cons = fp->rx_comp_cons;
        sw_comp_prod = fp->rx_comp_prod;
 
-       /* Memory barrier necessary as speculative reads of the rx
-        * buffer can be ahead of the index in the status block
-        */
-       rmb();
+       comp_ring_cons = RCQ_BD(sw_comp_cons);
+       cqe = &fp->rx_comp_ring[comp_ring_cons];
+       cqe_fp = &cqe->fast_path_cqe;
 
        DP(NETIF_MSG_RX_STATUS,
-          "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
-          fp->index, hw_comp_cons, sw_comp_cons);
+          "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
 
-       while (sw_comp_cons != hw_comp_cons) {
+       while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
                struct sw_rx_bd *rx_buf = NULL;
                struct sk_buff *skb;
-               union eth_rx_cqe *cqe;
-               struct eth_fast_path_rx_cqe *cqe_fp;
                u8 cqe_fp_flags;
                enum eth_rx_cqe_type cqe_fp_type;
                u16 len, pad, queue;
@@ -850,12 +841,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                        return 0;
 #endif
 
-               comp_ring_cons = RCQ_BD(sw_comp_cons);
                bd_prod = RX_BD(bd_prod);
                bd_cons = RX_BD(bd_cons);
 
-               cqe = &fp->rx_comp_ring[comp_ring_cons];
-               cqe_fp = &cqe->fast_path_cqe;
                cqe_fp_flags = cqe_fp->type_error_flags;
                cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 
@@ -899,7 +887,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                                                cqe_fp);
 
                                goto next_rx;
-
                        }
                        queue = cqe->end_agg_cqe.queue_index;
                        tpa_info = &fp->tpa_info[queue];
@@ -1002,9 +989,13 @@ reuse_rx:
                    PARSING_FLAGS_VLAN)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                               le16_to_cpu(cqe_fp->vlan_tag));
-               napi_gro_receive(&fp->napi, skb);
 
+               skb_mark_ll(skb, &fp->napi);
 
+               if (bnx2x_fp_ll_polling(fp))
+                       netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&fp->napi, skb);
 next_rx:
                rx_buf->data = NULL;
 
@@ -1016,8 +1007,15 @@ next_cqe:
                sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
                sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
 
+               /* mark CQE as free */
+               BNX2X_SEED_CQE(cqe_fp);
+
                if (rx_pkt == budget)
                        break;
+
+               comp_ring_cons = RCQ_BD(sw_comp_cons);
+               cqe = &fp->rx_comp_ring[comp_ring_cons];
+               cqe_fp = &cqe->fast_path_cqe;
        } /* while */
 
        fp->rx_bd_cons = bd_cons;
@@ -1053,8 +1051,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
 #endif
 
        /* Handle Rx and Tx according to MSI-X vector */
-       prefetch(fp->rx_cons_sb);
-
        for_each_cos_in_tx_queue(fp, cos)
                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
 
@@ -1118,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp,
 
        memset(data, 0, sizeof(*data));
 
-       /* Fill the report data: efective line speed */
+       /* Fill the report data: effective line speed */
        data->line_speed = line_speed;
 
        /* Link is down */
@@ -1161,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp)
  *
  * @bp:                driver handle
  *
- * None atomic inmlementation.
+ * None atomic implementation.
  * Should be called under the phy_lock.
  */
 void __bnx2x_link_report(struct bnx2x *bp)
@@ -1304,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
 
                if (!fp->disable_tpa) {
-                       /* Fill the per-aggregtion pool */
+                       /* Fill the per-aggregation pool */
                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
                                struct bnx2x_agg_info *tpa_info =
                                        &fp->tpa_info[i];
@@ -1726,7 +1722,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
        return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
 }
 
-int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
 {
        int rc = 0;
        if (bp->flags & USING_MSIX_FLAG &&
@@ -1759,32 +1755,46 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
 {
        int i;
 
-       for_each_rx_queue_cnic(bp, i)
+       for_each_rx_queue_cnic(bp, i) {
+               bnx2x_fp_init_lock(&bp->fp[i]);
                napi_enable(&bnx2x_fp(bp, i, napi));
+       }
 }
 
 static void bnx2x_napi_enable(struct bnx2x *bp)
 {
        int i;
 
-       for_each_eth_queue(bp, i)
+       for_each_eth_queue(bp, i) {
+               bnx2x_fp_init_lock(&bp->fp[i]);
                napi_enable(&bnx2x_fp(bp, i, napi));
+       }
 }
 
 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
 {
        int i;
 
-       for_each_rx_queue_cnic(bp, i)
+       local_bh_disable();
+       for_each_rx_queue_cnic(bp, i) {
                napi_disable(&bnx2x_fp(bp, i, napi));
+               while (!bnx2x_fp_lock_napi(&bp->fp[i]))
+                       mdelay(1);
+       }
+       local_bh_enable();
 }
 
 static void bnx2x_napi_disable(struct bnx2x *bp)
 {
        int i;
 
-       for_each_eth_queue(bp, i)
+       local_bh_disable();
+       for_each_eth_queue(bp, i) {
                napi_disable(&bnx2x_fp(bp, i, napi));
+               while (!bnx2x_fp_lock_napi(&bp->fp[i]))
+                       mdelay(1);
+       }
+       local_bh_enable();
 }
 
 void bnx2x_netif_start(struct bnx2x *bp)
@@ -1829,7 +1839,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
        }
 
        /* select a non-FCoE queue */
-       return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
+       return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -1862,7 +1872,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
  *
  * If the actual number of Tx queues (for each CoS) is less than 16 then there
  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
- * 16..31,...) with indicies that are not coupled with any real Tx queue.
+ * 16..31,...) with indices that are not coupled with any real Tx queue.
  *
  * The proper configuration of skb->queue_mapping is handled by
  * bnx2x_select_queue() and __skb_tx_hash().
@@ -1924,7 +1934,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
                                  ETH_OVREHEAD +
                                  mtu +
                                  BNX2X_FW_RX_ALIGN_END;
-               /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
+               /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
                if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
                        fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
                else
@@ -1937,7 +1947,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
        int i;
        u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 
-       /* Prepare the initial contents fo the indirection table if RSS is
+       /* Prepare the initial contents for the indirection table if RSS is
         * enabled
         */
        for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
@@ -2015,7 +2025,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
 
 /*
  * Cleans the object that have internal lists without sending
- * ramrods. Should be run when interrutps are disabled.
+ * ramrods. Should be run when interrupts are disabled.
  */
 void bnx2x_squeeze_objects(struct bnx2x *bp)
 {
@@ -2166,10 +2176,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
        bp->fw_stats_data_mapping = bp->fw_stats_mapping +
                bp->fw_stats_req_sz;
 
-       DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
+       DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
           U64_HI(bp->fw_stats_req_mapping),
           U64_LO(bp->fw_stats_req_mapping));
-       DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
+       DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
           U64_HI(bp->fw_stats_data_mapping),
           U64_LO(bp->fw_stats_data_mapping));
        return 0;
@@ -2183,6 +2193,8 @@ alloc_mem_err:
 /* send load request to mcp and analyze response */
 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
 {
+       u32 param;
+
        /* init fw_seq */
        bp->fw_seq =
                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
@@ -2195,9 +2207,13 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
                 DRV_PULSE_SEQ_MASK);
        BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
 
+       param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
+
+       if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
+               param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
+
        /* load request */
-       (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
-                                       DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+       (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
 
        /* if mcp fails to respond we must abort */
        if (!(*load_code)) {
@@ -2238,7 +2254,7 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
 
                /* abort nic load if version mismatch */
                if (my_fw != loaded_fw) {
-                       BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
+                       BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
                                  loaded_fw, my_fw);
                        return -EBUSY;
                }
@@ -2316,10 +2332,10 @@ static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
 {
        struct bnx2x_fastpath *fp = &bp->fp[index];
-
        int cos;
        struct napi_struct orig_napi = fp->napi;
        struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
+
        /* bzero bnx2x_fastpath contents */
        if (fp->tpa_info)
                memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
@@ -2345,8 +2361,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
                                BNX2X_NUM_ETH_QUEUES(bp) + index];
 
-       /*
-        * set the tpa flag for each queue. The tpa flag determines the queue
+       /* set the tpa flag for each queue. The tpa flag determines the queue
         * minimal size so it must be set prior to queue memory allocation
         */
        fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
@@ -2429,7 +2444,6 @@ int bnx2x_load_cnic(struct bnx2x *bp)
        if (bp->state == BNX2X_STATE_OPEN)
                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
 
-
        DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
 
        return 0;
@@ -2472,6 +2486,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
 
+       /* zero the structure w/o any lock, before SP handler is initialized */
        memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
                &bp->last_reported_link.link_report_flags);
@@ -2536,8 +2551,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        }
 
        /* configure multi cos mappings in kernel.
-        * this configuration may be overriden by a multi class queue discipline
-        * or by a dcbx negotiation result.
+        * this configuration may be overridden by a multi class queue
+        * discipline or by a dcbx negotiation result.
         */
        bnx2x_setup_tc(bp->dev, bp->max_cos);
 
@@ -2696,7 +2711,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* Start the Tx */
        switch (load_mode) {
        case LOAD_NORMAL:
-               /* Tx queue should be only reenabled */
+               /* Tx queue should be only re-enabled */
                netif_tx_wake_all_queues(bp->dev);
                break;
 
@@ -2841,7 +2856,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
        }
 
        /* Nothing to do during unload if previous bnx2x_nic_load()
-        * have not completed succesfully - all resourses are released.
+        * have not completed successfully - all resources are released.
         *
         * we can get here only after unsuccessful ndo_* callback, during which
         * dev->IFF_UP flag is still on.
@@ -2856,6 +2871,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
        smp_mb();
 
+       /* indicate to VFs that the PF is going down */
+       bnx2x_iov_channel_down(bp);
+
        if (CNIC_LOADED(bp))
                bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
 
@@ -2890,10 +2908,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
                /* Send the UNLOAD_REQUEST to the MCP */
                bnx2x_send_unload_req(bp, unload_mode);
 
-               /*
-                * Prevent transactions to host from the functions on the
+               /* Prevent transactions to host from the functions on the
                 * engine that doesn't reset global blocks in case of global
-                * attention once gloabl blocks are reset and gates are opened
+                * attention once global blocks are reset and gates are opened
                 * (the engine which leader will perform the recovery
                 * last).
                 */
@@ -2914,7 +2931,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
        }
 
        /*
-        * At this stage no more interrupts will arrive so we may safly clean
+        * At this stage no more interrupts will arrive so we may safely clean
         * the queueable objects here in case they failed to get cleaned so far.
         */
        if (IS_PF(bp))
@@ -2955,7 +2972,6 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
                        bnx2x_set_reset_global(bp);
        }
 
-
        /* The last driver must disable a "close the gate" if there is no
         * parity attention or "process kill" pending.
         */
@@ -3040,6 +3056,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
                        return 0;
                }
 #endif
+               if (!bnx2x_fp_lock_napi(fp))
+                       return work_done;
 
                for_each_cos_in_tx_queue(fp, cos)
                        if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
@@ -3049,12 +3067,15 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
                        work_done += bnx2x_rx_int(fp, budget - work_done);
 
                        /* must not complete if we consumed full budget */
-                       if (work_done >= budget)
+                       if (work_done >= budget) {
+                               bnx2x_fp_unlock_napi(fp);
                                break;
+                       }
                }
 
                /* Fall out from the NAPI loop if needed */
-               if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+               if (!bnx2x_fp_unlock_napi(fp) &&
+                   !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 
                        /* No need to update SB for FCoE L2 ring as long as
                         * it's connected to the default SB and the SB
@@ -3096,6 +3117,32 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
+#ifdef CONFIG_NET_LL_RX_POLL
+/* must be called with local_bh_disable()d */
+int bnx2x_low_latency_recv(struct napi_struct *napi)
+{
+       struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+                                                napi);
+       struct bnx2x *bp = fp->bp;
+       int found = 0;
+
+       if ((bp->state == BNX2X_STATE_CLOSED) ||
+           (bp->state == BNX2X_STATE_ERROR) ||
+           (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
+               return LL_FLUSH_FAILED;
+
+       if (!bnx2x_fp_lock_poll(fp))
+               return LL_FLUSH_BUSY;
+
+       if (bnx2x_has_rx_work(fp))
+               found = bnx2x_rx_int(fp, 4);
+
+       bnx2x_fp_unlock_poll(fp);
+
+       return found;
+}
+#endif
+
 /* we split the first BD into headers and data BDs
  * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
@@ -3192,11 +3239,11 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
                rc |= XMIT_CSUM_TCP;
 
        if (skb_is_gso_v6(skb)) {
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
                if (rc & XMIT_CSUM_ENC)
                        rc |= XMIT_GSO_ENC_V6;
        } else if (skb_is_gso(skb)) {
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+               rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
                if (rc & XMIT_CSUM_ENC)
                        rc |= XMIT_GSO_ENC_V4;
        }
@@ -3313,6 +3360,7 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
  */
 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
                              struct eth_tx_parse_bd_e1x *pbd,
+                             struct eth_tx_start_bd *tx_start_bd,
                              u32 xmit_type)
 {
        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
@@ -3326,11 +3374,14 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
                                                   ip_hdr(skb)->daddr,
                                                   0, IPPROTO_TCP, 0));
 
-       } else
+               /* GSO on 57710/57711 needs FW to calculate IP checksum */
+               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
+       } else {
                pbd->tcp_pseudo_csum =
                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                                 &ipv6_hdr(skb)->daddr,
                                                 0, IPPROTO_TCP, 0));
+       }
 
        pbd->global_data |=
                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
@@ -3479,23 +3530,25 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
 {
        u16 hlen_w = 0;
        u8 outerip_off, outerip_len = 0;
+
        /* from outer IP to transport */
        hlen_w = (skb_inner_transport_header(skb) -
                  skb_network_header(skb)) >> 1;
 
        /* transport len */
-       if (xmit_type & XMIT_CSUM_TCP)
-               hlen_w += inner_tcp_hdrlen(skb) >> 1;
-       else
-               hlen_w += sizeof(struct udphdr) >> 1;
+       hlen_w += inner_tcp_hdrlen(skb) >> 1;
 
        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
 
-       if (xmit_type & XMIT_CSUM_ENC_V4) {
+       /* outer IP header info */
+       if (xmit_type & XMIT_CSUM_V4) {
                struct iphdr *iph = ip_hdr(skb);
+               u16 csum = (__force u16)(~iph->check) -
+                          (__force u16)iph->tot_len -
+                          (__force u16)iph->frag_off;
+
                pbd2->fw_ip_csum_wo_len_flags_frag =
-                       bswab16(csum_fold((~iph->check) -
-                                         iph->tot_len - iph->frag_off));
+                       bswab16(csum_fold((__force __wsum)csum));
        } else {
                pbd2->fw_ip_hdr_to_payload_w =
                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
@@ -3583,7 +3636,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
           txq_index, fp_index, txdata_index); */
 
-       /* enable this debug print to view the tranmission details
+       /* enable this debug print to view the transmission details
        DP(NETIF_MSG_TX_QUEUED,
           "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
           txdata->cid, fp_index, txdata_index, txdata, fp); */
@@ -3814,7 +3867,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
                                             xmit_type);
                else
-                       bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+                       bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
        }
 
        /* Set the PBD's parsing_data field if not zero
@@ -3965,7 +4018,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
        /* setup tc must be called under rtnl lock */
        ASSERT_RTNL();
 
-       /* no traffic classes requested. aborting */
+       /* no traffic classes requested. Aborting */
        if (!num_tc) {
                netdev_reset_tc(dev);
                return 0;
@@ -3973,7 +4026,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 
        /* requested to support too many traffic classes */
        if (num_tc > bp->max_cos) {
-               BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
+               BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
                          num_tc, bp->max_cos);
                return -EINVAL;
        }
@@ -3992,8 +4045,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
                   prio, bp->prio_to_cos[prio]);
        }
 
-
-       /* Use this configuration to diffrentiate tc0 from other COSes
+       /* Use this configuration to differentiate tc0 from other COSes
           This can be used for ets or pfc, and save the effort of setting
           up a multio class queue disc or negotiating DCBX with a switch
        netdev_set_prio_tc_map(dev, 0, 0);
@@ -4285,10 +4337,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
                                &bnx2x_fp(bp, index, rx_desc_mapping),
                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
 
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
-                               &bnx2x_fp(bp, index, rx_comp_mapping),
-                               sizeof(struct eth_fast_path_rx_cqe) *
-                               NUM_RCQ_BD);
+               /* Seed all CQEs by 1s */
+               BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
+                                &bnx2x_fp(bp, index, rx_comp_mapping),
+                                sizeof(struct eth_fast_path_rx_cqe) *
+                                NUM_RCQ_BD);
 
                /* SGE ring */
                BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
@@ -4469,7 +4522,6 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
 alloc_err:
        bnx2x_free_mem_bp(bp);
        return -ENOMEM;
-
 }
 
 int bnx2x_reload_if_running(struct net_device *dev)
@@ -4511,7 +4563,6 @@ int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
        }
 
        return sel_phy_idx;
-
 }
 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
 {
@@ -4599,6 +4650,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 flags = bp->flags;
+       u32 changes;
        bool bnx2x_reload = false;
 
        if (features & NETIF_F_LRO)
@@ -4623,10 +4675,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
                }
        }
 
-       if (flags ^ bp->flags) {
-               bp->flags = flags;
+       changes = flags ^ bp->flags;
+
+       /* if GRO is changed while LRO is enabled, don't force a reload */
+       if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
+               changes &= ~GRO_ENABLE_FLAG;
+
+       if (changes)
                bnx2x_reload = true;
-       }
+
+       bp->flags = flags;
 
        if (bnx2x_reload) {
                if (bp->recovery_state == BNX2X_RECOVERY_DONE)
@@ -4721,7 +4779,6 @@ int bnx2x_resume(struct pci_dev *pdev)
        return rc;
 }
 
-
 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
                              u32 cid)
 {
@@ -4739,7 +4796,6 @@ static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
                                    u8 fw_sb_id, u8 sb_index,
                                    u8 ticks)
 {
-
        u32 addr = BAR_CSTRORM_INTMEM +
                   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
        REG_WR8(bp, addr, ticks);
index 151675d66b0d4d1f5b80289bb8697c43c69fb301..c07a6d054cfe970b031582400ef71212bd45780c 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 
-
 #include "bnx2x.h"
 #include "bnx2x_sriov.h"
 
@@ -50,13 +49,25 @@ extern int int_mode;
                } \
        } while (0)
 
-#define BNX2X_PCI_ALLOC(x, y, size)                            \
-do {                                                           \
-       x = dma_alloc_coherent(&bp->pdev->dev, size, y,         \
-                              GFP_KERNEL | __GFP_ZERO);        \
-       if (x == NULL)                                          \
-               goto alloc_mem_err;                             \
-} while (0)
+#define BNX2X_PCI_ALLOC(x, y, size) \
+       do { \
+               x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+                                      GFP_KERNEL | __GFP_ZERO); \
+               if (x == NULL) \
+                       goto alloc_mem_err; \
+               DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+                  (unsigned long long)(*y), x); \
+       } while (0)
+
+#define BNX2X_PCI_FALLOC(x, y, size) \
+       do { \
+               x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+               if (x == NULL) \
+                       goto alloc_mem_err; \
+               memset((void *)x, 0xFFFFFFFF, size); \
+               DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\
+                  (unsigned long long)(*y), x); \
+       } while (0)
 
 #define BNX2X_ALLOC(x, size) \
        do { \
@@ -494,9 +505,6 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
 /* Error handling */
 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 
-/* validate currect fw is loaded */
-bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
-
 /* dev_close main block */
 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
 
@@ -606,6 +614,13 @@ int bnx2x_enable_msi(struct bnx2x *bp);
  */
 int bnx2x_poll(struct napi_struct *napi, int budget);
 
+/**
+ * bnx2x_low_latency_recv - LL callback
+ *
+ * @napi:      napi structure
+ */
+int bnx2x_low_latency_recv(struct napi_struct *napi);
+
 /**
  * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
  *
@@ -800,16 +815,18 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
        return false;
 }
 
+#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
+#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
 {
-       u16 rx_cons_sb;
+       u16 cons;
+       union eth_rx_cqe *cqe;
+       struct eth_fast_path_rx_cqe *cqe_fp;
 
-       /* Tell compiler that status block fields can change */
-       barrier();
-       rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
-       if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
-               rx_cons_sb++;
-       return (fp->rx_comp_cons != rx_cons_sb);
+       cons = RCQ_BD(fp->rx_comp_cons);
+       cqe = &fp->rx_comp_ring[cons];
+       cqe_fp = &cqe->fast_path_cqe;
+       return BNX2X_IS_CQE_COMPLETED(cqe_fp);
 }
 
 /**
@@ -848,9 +865,11 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
        int i;
 
        /* Add NAPI objects */
-       for_each_rx_queue_cnic(bp, i)
+       for_each_rx_queue_cnic(bp, i) {
                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
                               bnx2x_poll, NAPI_POLL_WEIGHT);
+               napi_hash_add(&bnx2x_fp(bp, i, napi));
+       }
 }
 
 static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -858,25 +877,31 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
        int i;
 
        /* Add NAPI objects */
-       for_each_eth_queue(bp, i)
+       for_each_eth_queue(bp, i) {
                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
                               bnx2x_poll, NAPI_POLL_WEIGHT);
+               napi_hash_add(&bnx2x_fp(bp, i, napi));
+       }
 }
 
 static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
 {
        int i;
 
-       for_each_rx_queue_cnic(bp, i)
+       for_each_rx_queue_cnic(bp, i) {
+               napi_hash_del(&bnx2x_fp(bp, i, napi));
                netif_napi_del(&bnx2x_fp(bp, i, napi));
+       }
 }
 
 static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 {
        int i;
 
-       for_each_eth_queue(bp, i)
+       for_each_eth_queue(bp, i) {
+               napi_hash_del(&bnx2x_fp(bp, i, napi));
                netif_napi_del(&bnx2x_fp(bp, i, napi));
+       }
 }
 
 int bnx2x_set_int_mode(struct bnx2x *bp);
@@ -1171,7 +1196,6 @@ static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
 
 static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
 {
-
        /* the 'first' id is allocated for the cnic */
        return bp->base_fw_ndsb;
 }
@@ -1181,7 +1205,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
        return bp->igu_base_sb;
 }
 
-
 static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
 {
        struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
@@ -1334,8 +1357,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
        int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
 
        /*
-        * 1. number of frags should not grow above MAX_SKB_FRAGS
-        * 2. frag must fit the page
+        * 1. Number of frags should not grow above MAX_SKB_FRAGS
+        * 2. Frag must fit the page
         */
        return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
 }
index 4b077a7f16af11d1bc3e66bc544044d428a4d8ad..0c94df47e0e8ee46d273488413be3d56d9c29ef6 100644 (file)
@@ -253,7 +253,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
 
        memset(&pg_help_data, 0, sizeof(struct pg_help_data));
 
-
        if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
                DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n");
 
@@ -298,7 +297,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
 static void  bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
                                        struct dcbx_pfc_feature *pfc, u32 error)
 {
-
        if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
                DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n");
 
@@ -367,7 +365,6 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
        struct lldp_remote_mib *remote_mib ;
        struct lldp_local_mib  *local_mib;
 
-
        switch (read_mib_type) {
        case DCBX_READ_LOCAL_MIB:
                mib_size = sizeof(struct lldp_local_mib);
@@ -629,7 +626,6 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
        return 0;
 }
 
-
 #ifdef BCM_DCBNL
 static inline
 u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
@@ -691,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
        }
 
        /* setup tc must be called under rtnl lock, but we can't take it here
-        * as we are handling an attetntion on a work queue which must be
+        * as we are handling an attention on a work queue which must be
         * flushed at some rtnl-locked contexts (e.g. if down)
         */
        if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
@@ -711,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                         */
                        bnx2x_dcbnl_update_applist(bp, true);
 
-                       /* Read rmeote mib if dcbx is in the FW */
+                       /* Read remote mib if dcbx is in the FW */
                        if (bnx2x_dcbx_read_shmem_remote_mib(bp))
                                return;
 #endif
@@ -742,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                        bnx2x_dcbx_update_tc_mapping(bp);
 
                        /*
-                        * allow other funtions to update their netdevices
+                        * allow other functions to update their netdevices
                         * accordingly
                         */
                        if (IS_MF(bp))
@@ -864,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
                           i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
                }
 
-               /*For IEEE admin_recommendation_bw_precentage
+               /*For IEEE admin_recommendation_bw_percentage
                 *For IEEE admin_recommendation_ets_pg */
                af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
                for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
@@ -896,13 +892,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
                }
 
                af->app.default_pri = (u8)dp->admin_default_priority;
-
        }
 
        /* Write the data. */
        bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
                         sizeof(struct lldp_admin_mib));
-
 }
 
 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
@@ -1076,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
        bool pg_found  = false;
        u32 i, traf_type, add_traf_type, add_pg;
        u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
-       struct pg_entry_help_data *data = help_data->data; /*shotcut*/
+       struct pg_entry_help_data *data = help_data->data; /*shortcut*/
 
        /* Set to invalid */
        for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
@@ -1172,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
                                DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
                else
                        /* If we join a group and one is strict
-                        * than the bw rulls */
+                        * than the bw rules
+                        */
                        cos_data->data[entry].strict =
                                                BNX2X_DCBX_STRICT_COS_HIGHEST;
        }
@@ -1181,7 +1176,6 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
                BNX2X_ERR("dcbx error: Both groups must have priorities\n");
 }
 
-
 #ifndef POWER_OF_2
 #define POWER_OF_2(x)  ((0 != x) && (0 == (x & (x-1))))
 #endif
@@ -1284,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
                } else {
                        /* If there are only pauseable priorities or
                         * only non-pauseable,* the lower priorities go
-                        * to the first queue and the higherpriorities go
+                        * to the first queue and the higher priorities go
                         * to the second queue.
                         */
                        cos_data->data[0].pausable =
@@ -1484,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
                 * queue and one priority goes to the second queue.
                 *
                 * We will join this two cases:
-                * if one is BW limited it will go to the secoend queue
+                * if one is BW limited it will go to the second queue
                 * otherwise the last priority will get it
                 */
 
@@ -1504,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
                                    false == b_found_strict)
                                        /* last entry will be handled separately
                                         * If no priority is strict than last
-                                        * enty goes to last queue.*/
+                                        * entry goes to last queue.
+                                        */
                                        entry = 1;
                                cos_data->data[entry].pri_join_mask |=
                                                                pri_tested;
@@ -1516,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
                                b_found_strict = true;
                                cos_data->data[1].pri_join_mask |= pri_tested;
                                /* If we join a group and one is strict
-                                * than the bw rulls */
+                                * than the bw rules
+                                */
                                cos_data->data[1].strict =
                                        BNX2X_DCBX_STRICT_COS_HIGHEST;
                        }
@@ -1524,7 +1520,6 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
        }
 }
 
-
 static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
                                       struct pg_help_data *help_data,
                                       struct dcbx_ets_feature *ets,
@@ -1533,7 +1528,6 @@ static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
                                       u32 pri_join_mask,
                                       u8 num_of_dif_pri)
 {
-
        /* default E2 settings */
        cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
 
@@ -1629,7 +1623,6 @@ static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
                                         u8 num_spread_of_entries,
                                         u8 strict_app_pris)
 {
-
        if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
                                         num_spread_of_entries,
                                         strict_app_pris)) {
@@ -1848,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
 
 void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
 {
-       /* if we need to syncronize DCBX result from prev PMF
+       /* if we need to synchronize DCBX result from prev PMF
         * read it from shmem and update bp and netdev accordingly
         */
        if (SHMEM2_HAS(bp, drv_flags) &&
@@ -1876,7 +1869,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
                 * dcbx negotiation.
                 */
                bnx2x_dcbx_update_tc_mapping(bp);
-
        }
 }
 
@@ -1943,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
                return;
 
        /**
-        * bw_pct ingnored -    band-width percentage devision between user
+        * bw_pct ignored -     band-width percentage devision between user
         *                      priorities within the same group is not
         *                      standard and hence not supported
         *
-        * prio_type igonred -  priority levels within the same group are not
+        * prio_type ignored -  priority levels within the same group are not
         *                      standard and hence are not supported. According
         *                      to the standard pgid 15 is dedicated to strict
-        *                      prioirty traffic (on the port level).
+        *                      priority traffic (on the port level).
         *
         * up_map ignored
         */
@@ -1995,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
        DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
 
        /**
-        * bw_pct ingnored -    band-width percentage devision between user
+        * bw_pct ignored -     band-width percentage devision between user
         *                      priorities within the same group is not
         *                      standard and hence not supported
         *
-        * prio_type igonred -  priority levels within the same group are not
+        * prio_type ignored -  priority levels within the same group are not
         *                      standard and hence are not supported. According
         *                      to the standard pgid 15 is dedicated to strict
-        *                      prioirty traffic (on the port level).
+        *                      priority traffic (on the port level).
         *
         * up_map ignored
         */
@@ -2389,7 +2381,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
                                *flags |= DCB_FEATCFG_ERROR;
                        break;
                default:
-                       BNX2X_ERR("Non valid featrue-ID\n");
+                       BNX2X_ERR("Non valid feature-ID\n");
                        rval = 1;
                        break;
                }
@@ -2430,7 +2422,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
                                flags & DCB_FEATCFG_WILLING ? 1 : 0;
                        break;
                default:
-                       BNX2X_ERR("Non valid featrue-ID\n");
+                       BNX2X_ERR("Non valid feature-ID\n");
                        rval = 1;
                        break;
                }
index d153f44cf8f99540c3988a1dd76ca777c10d5fcd..125bd1b6586ffc1f96b5fc946a4ee5a4613ce5a4 100644 (file)
@@ -134,8 +134,6 @@ enum {
 #define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD                   130
 #define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD                  170
 
-
-
 struct cos_entry_help_data {
        u32                     pri_join_mask;
        u32                     cos_bw;
@@ -170,7 +168,6 @@ struct cos_help_data {
                        (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
                         IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
 
-
 struct pg_entry_help_data {
        u8      num_of_dif_pri;
        u8      pg;
index bff5e33eaa1496bc6d9b93d58b2cfc4ef40fbbd6..12eb4baee9f642e444bbb162d038911776b4d852 100644 (file)
  * consent.
  */
 
-
-/* This struct holds a signature to ensure the dump returned from the driver
- * match the meta data file inserted to grc_dump.tcl
- * The signature is time stamp, diag version and grc_dump version
- */
-
 #ifndef BNX2X_DUMP_H
 #define BNX2X_DUMP_H
 
@@ -28,7 +22,6 @@
 #define DRV_DUMP_USTORM_WAITP_ADDRESS    0x338a80
 #define DRV_DUMP_CSTORM_WAITP_ADDRESS    0x238a80
 
-
 /* Possible Chips */
 #define DUMP_CHIP_E1 1
 #define DUMP_CHIP_E1H 2
index ce1a91618677c13e232f4b017485722857e35520..b8c067d1a0f2948a5efa66ea597ecea0b1145865 100644 (file)
@@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        speed = ethtool_cmd_speed(cmd);
 
-       /* If recieved a request for an unknown duplex, assume full*/
+       /* If received a request for an unknown duplex, assume full*/
        if (cmd->duplex == DUPLEX_UNKNOWN)
                cmd->duplex = DUPLEX_FULL;
 
@@ -733,7 +733,6 @@ static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
                return false;
 }
 
-
 static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
        const struct wreg_addr *wreg_info)
 {
@@ -850,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
 
        /* Paged registers are supported in E2 & E3 only */
        if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
-               /* Read "paged" registes */
+               /* Read "paged" registers */
                bnx2x_read_pages_regs(bp, p, preset);
        }
 
@@ -1155,8 +1154,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
        return bp->common.flash_size;
 }
 
-/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had
- * we done things the other way around, if two pfs from the same port would
+/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
+ * had we done things the other way around, if two pfs from the same port would
  * attempt to access nvram at the same time, we could run into a scenario such
  * as:
  * pf A takes the port lock.
@@ -1381,12 +1380,29 @@ static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
        return rc;
 }
 
+static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
+{
+       int rc = 1;
+       u16 pm = 0;
+       struct net_device *dev = pci_get_drvdata(bp->pdev);
+
+       if (bp->pm_cap)
+               rc = pci_read_config_word(bp->pdev,
+                                         bp->pm_cap + PCI_PM_CTRL, &pm);
+
+       if ((rc && !netif_running(dev)) ||
+           (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
+               return false;
+
+       return true;
+}
+
 static int bnx2x_get_eeprom(struct net_device *dev,
                            struct ethtool_eeprom *eeprom, u8 *eebuf)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
-       if (!netif_running(dev)) {
+       if (!bnx2x_is_nvm_accessible(bp)) {
                DP(BNX2X_MSG_ETHTOOL  | BNX2X_MSG_NVM,
                   "cannot access eeprom when the interface is down\n");
                return -EAGAIN;
@@ -1411,7 +1427,7 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
        u8 *user_data = data;
        unsigned int start_addr = ee->offset, xfer_size = 0;
 
-       if (!netif_running(dev)) {
+       if (!bnx2x_is_nvm_accessible(bp)) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
                   "cannot access eeprom when the interface is down\n");
                return -EAGAIN;
@@ -1474,7 +1490,7 @@ static int bnx2x_get_module_info(struct net_device *dev,
        int phy_idx, rc;
        u8 sff8472_comp, diag_type;
 
-       if (!netif_running(dev)) {
+       if (!bnx2x_is_nvm_accessible(bp)) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
                   "cannot access eeprom when the interface is down\n");
                return -EAGAIN;
@@ -1594,8 +1610,10 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
                 */
                val = be32_to_cpu(val_be);
 
-               val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset));
-               val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset));
+               val &= ~le32_to_cpu((__force __le32)
+                                   (0xff << BYTE_OFFSET(offset)));
+               val |= le32_to_cpu((__force __le32)
+                                  (*data_buf << BYTE_OFFSET(offset)));
 
                rc = bnx2x_nvram_write_dword(bp, align_offset, val,
                                             cmd_flags);
@@ -1676,7 +1694,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
        int port = BP_PORT(bp);
        int rc = 0;
        u32 ext_phy_config;
-       if (!netif_running(dev)) {
+
+       if (!bnx2x_is_nvm_accessible(bp)) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
                   "cannot access eeprom when the interface is down\n");
                return -EAGAIN;
@@ -1921,6 +1940,19 @@ static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
        "link_test (online)         "
 };
 
+enum {
+       BNX2X_PRI_FLAG_ISCSI,
+       BNX2X_PRI_FLAG_FCOE,
+       BNX2X_PRI_FLAG_STORAGE,
+       BNX2X_PRI_FLAG_LEN,
+};
+
+static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+       "iSCSI offload support",
+       "FCoE offload support",
+       "Storage only interface"
+};
+
 static u32 bnx2x_eee_to_adv(u32 eee_adv)
 {
        u32 modes = 0;
@@ -2041,7 +2073,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
                                    EEE_MODE_OVERRIDE_NVRAM |
                                    EEE_MODE_OUTPUT_TIME;
 
-       /* Restart link to propogate changes */
+       /* Restart link to propagate changes */
        if (netif_running(dev)) {
                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
                bnx2x_force_link_reset(bp);
@@ -2160,7 +2192,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
                { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
        };
 
-       if (!netif_running(bp->dev)) {
+       if (!bnx2x_is_nvm_accessible(bp)) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
                   "cannot access eeprom when the interface is down\n");
                return rc;
@@ -2264,7 +2296,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
                { NULL, 0xffffffff, {0, 0, 0, 0} }
        };
 
-       if (!netif_running(bp->dev)) {
+       if (!bnx2x_is_nvm_accessible(bp)) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
                   "cannot access eeprom when the interface is down\n");
                return rc;
@@ -2978,32 +3010,47 @@ static int bnx2x_num_stat_queues(struct bnx2x *bp)
 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       int i, num_stats;
+       int i, num_strings = 0;
 
        switch (stringset) {
        case ETH_SS_STATS:
                if (is_multi(bp)) {
-                       num_stats = bnx2x_num_stat_queues(bp) *
-                                               BNX2X_NUM_Q_STATS;
+                       num_strings = bnx2x_num_stat_queues(bp) *
+                                     BNX2X_NUM_Q_STATS;
                } else
-                       num_stats = 0;
+                       num_strings = 0;
                if (IS_MF_MODE_STAT(bp)) {
                        for (i = 0; i < BNX2X_NUM_STATS; i++)
                                if (IS_FUNC_STAT(i))
-                                       num_stats++;
+                                       num_strings++;
                } else
-                       num_stats += BNX2X_NUM_STATS;
+                       num_strings += BNX2X_NUM_STATS;
 
-               return num_stats;
+               return num_strings;
 
        case ETH_SS_TEST:
                return BNX2X_NUM_TESTS(bp);
 
+       case ETH_SS_PRIV_FLAGS:
+               return BNX2X_PRI_FLAG_LEN;
+
        default:
                return -EINVAL;
        }
 }
 
+static u32 bnx2x_get_private_flags(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       u32 flags = 0;
+
+       flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
+       flags |= (!(bp->flags & NO_FCOE_FLAG)  ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
+       flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
+
+       return flags;
+}
+
 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        struct bnx2x *bp = netdev_priv(dev);
@@ -3026,7 +3073,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                        }
                }
 
-
                for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
                        if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
                                continue;
@@ -3045,6 +3091,12 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                        start = 4;
                memcpy(buf, bnx2x_tests_str_arr + start,
                       ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+               break;
+
+       case ETH_SS_PRIV_FLAGS:
+               memcpy(buf, bnx2x_private_arr,
+                      ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+               break;
        }
 }
 
@@ -3106,17 +3158,12 @@ static int bnx2x_set_phys_id(struct net_device *dev,
 {
        struct bnx2x *bp = netdev_priv(dev);
 
-       if (!netif_running(dev)) {
+       if (!bnx2x_is_nvm_accessible(bp)) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
                   "cannot access eeprom when the interface is down\n");
                return -EAGAIN;
        }
 
-       if (!bp->port.pmf) {
-               DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
-               return -EOPNOTSUPP;
-       }
-
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
                return 1;       /* cycle on/off once per second */
@@ -3148,7 +3195,6 @@ static int bnx2x_set_phys_id(struct net_device *dev,
 
 static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
 {
-
        switch (info->flow_type) {
        case TCP_V4_FLOW:
        case TCP_V6_FLOW:
@@ -3384,7 +3430,6 @@ static int bnx2x_set_channels(struct net_device *dev,
 {
        struct bnx2x *bp = netdev_priv(dev);
 
-
        DP(BNX2X_MSG_ETHTOOL,
           "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
           channels->rx_count, channels->tx_count, channels->other_count,
@@ -3445,6 +3490,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
        .set_pauseparam         = bnx2x_set_pauseparam,
        .self_test              = bnx2x_self_test,
        .get_sset_count         = bnx2x_get_sset_count,
+       .get_priv_flags         = bnx2x_get_private_flags,
        .get_strings            = bnx2x_get_strings,
        .set_phys_id            = bnx2x_set_phys_id,
        .get_ethtool_stats      = bnx2x_get_ethtool_stats,
index 12f00a40cdf0e8c280f404d663378729f9cf3975..5018e52ae2ad8fac5a194b0f137b66d5b8aaf0b4 100644 (file)
@@ -1323,6 +1323,8 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET     0x00000002
 
        #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA          0x0000100a
+       #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA         0x00002000
+
        u32 fw_mb_header;
        #define FW_MSG_CODE_MASK                        0xffff0000
        #define FW_MSG_CODE_DRV_LOAD_COMMON             0x10100000
@@ -3816,7 +3818,8 @@ struct eth_fast_path_rx_cqe {
        __le16 len_on_bd;
        struct parsing_flags pars_flags;
        union eth_sgl_or_raw_data sgl_or_raw_data;
-       __le32 reserved1[8];
+       __le32 reserved1[7];
+       u32 marker;
 };
 
 
index b4c9dea93a5362ecfa899c6b3a247d6432fcb4fd..740518bbcb5ffa159c976d93d9774be6c2aa9733 100644 (file)
@@ -93,7 +93,6 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 
-
 int num_queues;
 module_param(num_queues, int, 0);
 MODULE_PARM_DESC(num_queues,
@@ -103,8 +102,6 @@ static int disable_tpa;
 module_param(disable_tpa, int, 0);
 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 
-#define INT_MODE_INTx                  1
-#define INT_MODE_MSI                   2
 int int_mode;
 module_param(int_mode, int, 0);
 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
@@ -122,8 +119,6 @@ static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, " Default debug msglevel");
 
-
-
 struct workqueue_struct *bnx2x_wq;
 
 struct bnx2x_mac_vals {
@@ -376,9 +371,11 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 #define DMAE_DP_DST_PCI                "pci dst_addr [%x:%08x]"
 #define DMAE_DP_DST_NONE       "dst_addr [none]"
 
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+static void bnx2x_dp_dmae(struct bnx2x *bp,
+                         struct dmae_command *dmae, int msglvl)
 {
        u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+       int i;
 
        switch (dmae->opcode & DMAE_COMMAND_DST) {
        case DMAE_CMD_DST_PCI:
@@ -434,6 +431,10 @@ void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
                           dmae->comp_val);
                break;
        }
+
+       for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
+               DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
+                  i, *(((u32 *)dmae) + i));
 }
 
 /* copy command into DMAE command memory and set DMAE command go */
@@ -508,8 +509,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
        int rc = 0;
 
-       /*
-        * Lock the dmae channel. Disable BHs to prevent a dead-lock
+       bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
+
+       /* Lock the dmae channel. Disable BHs to prevent a dead-lock
         * as long as this code is called both from syscall context and
         * from ndo_set_rx_mode() flow that may be called from BH.
         */
@@ -548,6 +550,7 @@ unlock:
 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
                      u32 len32)
 {
+       int rc;
        struct dmae_command dmae;
 
        if (!bp->dmae_ready) {
@@ -571,11 +574,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       if (rc) {
+               BNX2X_ERR("DMAE returned failure %d\n", rc);
+               bnx2x_panic();
+       }
 }
 
 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
 {
+       int rc;
        struct dmae_command dmae;
 
        if (!bp->dmae_ready) {
@@ -603,7 +611,11 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       if (rc) {
+               BNX2X_ERR("DMAE returned failure %d\n", rc);
+               bnx2x_panic();
+       }
 }
 
 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
@@ -811,8 +823,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
        u32 val = REG_RD(bp, addr);
 
        /* in E1 we must use only PCI configuration space to disable
-        * MSI/MSIX capablility
-        * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+        * MSI/MSIX capability
+        * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
         */
        if (CHIP_IS_E1(bp)) {
                /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
@@ -839,7 +851,7 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
 
        REG_WR(bp, addr, val);
        if (REG_RD(bp, addr) != val)
-               BNX2X_ERR("BUG! proper val not read from IGU!\n");
+               BNX2X_ERR("BUG! Proper val not read from IGU!\n");
 }
 
 static void bnx2x_igu_int_disable(struct bnx2x *bp)
@@ -857,7 +869,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
 
        REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
        if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
-               BNX2X_ERR("BUG! proper val not read from IGU!\n");
+               BNX2X_ERR("BUG! Proper val not read from IGU!\n");
 }
 
 static void bnx2x_int_disable(struct bnx2x *bp)
@@ -917,7 +929,6 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
               sp_sb_data.p_func.vf_valid,
               sp_sb_data.state);
 
-
        for_each_eth_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
                int loop;
@@ -1016,7 +1027,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
                                hc_sm_p[j].timer_value);
                }
 
-               /* Indecies data */
+               /* Indices data */
                for (j = 0; j < loop; j++) {
                        pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
                               hc_index_p[j].flags,
@@ -1027,6 +1038,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
 #ifdef BNX2X_STOP_ON_ERROR
 
        /* event queue */
+       BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
        for (i = 0; i < NUM_EQ_DESC; i++) {
                u32 *data = (u32 *)&bp->eq_ring[i].message.data;
 
@@ -1111,7 +1123,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
  * initialization.
  */
-#define FLR_WAIT_USEC          10000   /* 10 miliseconds */
+#define FLR_WAIT_USEC          10000   /* 10 milliseconds */
 #define FLR_WAIT_INTERVAL      50      /* usec */
 #define        FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
 
@@ -1290,7 +1302,6 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
        for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
                bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
 
-
        /* Verify the transmission buffers are flushed P0, P1, P4 */
        for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
                bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
@@ -1305,11 +1316,9 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
 #define OP_GEN_AGG_VECT(index) \
        (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
 
-
 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
 {
        u32 op_gen_command = 0;
-
        u32 comp_addr = BAR_CSTRORM_INTMEM +
                        CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
        int ret = 0;
@@ -1334,7 +1343,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
                bnx2x_panic();
                return 1;
        }
-       /* Zero completion for nxt FLR */
+       /* Zero completion for next FLR */
        REG_WR(bp, comp_addr, 0);
 
        return ret;
@@ -1352,7 +1361,6 @@ u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
 */
 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
 {
-
        /* wait for CFC PF usage-counter to zero (includes all the VFs) */
        if (bnx2x_flr_clnup_poll_hw_counter(bp,
                        CFC_REG_NUM_LCIDS_INSIDE_PF,
@@ -1360,7 +1368,6 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
                        poll_cnt))
                return 1;
 
-
        /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
        if (bnx2x_flr_clnup_poll_hw_counter(bp,
                        DORQ_REG_PF_USAGE_CNT,
@@ -1390,7 +1397,7 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
        /* Wait DMAE PF usage counter to zero */
        if (bnx2x_flr_clnup_poll_hw_counter(bp,
                        dmae_reg_go_c[INIT_DMAE_C(bp)],
-                       "DMAE dommand register timed out",
+                       "DMAE command register timed out",
                        poll_cnt))
                return 1;
 
@@ -1770,7 +1777,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                break;
 
        case (RAMROD_CMD_ID_ETH_TERMINATE):
-               DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
+               DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
                drv_cmd = BNX2X_Q_CMD_TERMINATE;
                break;
 
@@ -1859,7 +1866,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
                mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
                if (status & mask) {
                        /* Handle Rx or Tx according to SB id */
-                       prefetch(fp->rx_cons_sb);
                        for_each_cos_in_tx_queue(fp, cos)
                                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
                        prefetch(&fp->sb_running_index[SM_RX_ID]);
@@ -1947,7 +1953,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
                if (lock_status & resource_bit)
                        return 0;
 
-               msleep(5);
+               usleep_range(5000, 10000);
        }
        BNX2X_ERR("Timeout\n");
        return -EAGAIN;
@@ -1982,8 +1988,8 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
        /* Validating that the resource is currently taken */
        lock_status = REG_RD(bp, hw_lock_control_reg);
        if (!(lock_status & resource_bit)) {
-               BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
-                  lock_status, resource_bit);
+               BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
+                         lock_status, resource_bit);
                return -EFAULT;
        }
 
@@ -1991,7 +1997,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
        return 0;
 }
 
-
 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
 {
        /* The GPIO should be swapped if swap register is set and active */
@@ -2347,14 +2352,13 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
        return rc;
 }
 
-
 /* Calculates the sum of vn_min_rates.
    It's needed for further normalizing of the min_rates.
    Returns:
      sum of vn_min_rates.
        or
      0 - if all the min_rates are 0.
-     In the later case fainess algorithm should be deactivated.
+     In the later case fairness algorithm should be deactivated.
      If not all min_rates are zero then those that are zeroes will be set to 1.
  */
 static void bnx2x_calc_vn_min(struct bnx2x *bp,
@@ -2419,7 +2423,6 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
        input->vnic_max_rate[vn] = vn_max_rate;
 }
 
-
 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
 {
        if (CHIP_REV_IS_SLOW(bp))
@@ -2435,7 +2438,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
        int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
 
        if (BP_NOMCP(bp))
-               return; /* what should be the default bvalue in this case */
+               return; /* what should be the default value in this case */
 
        /* For 2 port configuration the absolute function number formula
         * is:
@@ -2901,7 +2904,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
        return rc;
 }
 
-
 static void storm_memset_func_cfg(struct bnx2x *bp,
                                 struct tstorm_eth_function_common_config *tcfg,
                                 u16 abs_fid)
@@ -2935,7 +2937,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 }
 
 /**
- * bnx2x_get_tx_only_flags - Return common flags
+ * bnx2x_get_common_flags - Return common flags
  *
  * @bp         device handle
  * @fp         queue handle
@@ -3006,7 +3008,6 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
        if (IS_MF_AFEX(bp))
                __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
 
-
        return flags | bnx2x_get_common_flags(bp, fp, true);
 }
 
@@ -3082,7 +3083,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
         * placed on the BD (not including paddings).
         */
        rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
-               BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
+                          BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
 
        rxq_init->cl_qzone_id = fp->cl_qzone_id;
        rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -3124,7 +3125,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
        txq_init->fw_sb_id = fp->fw_sb_id;
 
        /*
-        * set the tss leading client id for TX classfication ==
+        * set the tss leading client id for TX classification ==
         * leading RSS client id
         */
        txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
@@ -3196,7 +3197,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
        storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
 }
 
-
 static void bnx2x_e1h_disable(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
@@ -3212,7 +3212,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
 
        REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
 
-       /* Tx queue should be only reenabled */
+       /* Tx queue should be only re-enabled */
        netif_tx_wake_all_queues(bp->dev);
 
        /*
@@ -3540,10 +3540,8 @@ static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
                return true;
        else
                return false;
-
 }
 
-
 /**
  * bnx2x_sp_post - place a single command on an SP ring
  *
@@ -3608,14 +3606,13 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
        /*
         * It's ok if the actual decrement is issued towards the memory
         * somewhere between the spin_lock and spin_unlock. Thus no
-        * more explict memory barrier is needed.
+        * more explicit memory barrier is needed.
         */
        if (common)
                atomic_dec(&bp->eq_spq_left);
        else
                atomic_dec(&bp->cq_spq_left);
 
-
        DP(BNX2X_MSG_SP,
           "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
           bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
@@ -3637,15 +3634,14 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
 
        might_sleep();
        for (j = 0; j < 1000; j++) {
-               val = (1UL << 31);
-               REG_WR(bp, GRCBASE_MCP + 0x9c, val);
-               val = REG_RD(bp, GRCBASE_MCP + 0x9c);
-               if (val & (1L << 31))
+               REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
+               val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
+               if (val & MCPR_ACCESS_LOCK_LOCK)
                        break;
 
-               msleep(5);
+               usleep_range(5000, 10000);
        }
-       if (!(val & (1L << 31))) {
+       if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
                BNX2X_ERR("Cannot acquire MCP access lock register\n");
                rc = -EBUSY;
        }
@@ -3656,7 +3652,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
 /* release split MCP access lock register */
 static void bnx2x_release_alr(struct bnx2x *bp)
 {
-       REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+       REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
 }
 
 #define BNX2X_DEF_SB_ATT_IDX   0x0001
@@ -3678,7 +3674,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
                rc |= BNX2X_DEF_SB_IDX;
        }
 
-       /* Do not reorder: indecies reading should complete before handling */
+       /* Do not reorder: indices reading should complete before handling */
        barrier();
        return rc;
 }
@@ -3827,8 +3823,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
        netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
                            "Please contact OEM Support for assistance\n");
 
-       /*
-        * Schedule device reset (unload)
+       /* Schedule device reset (unload)
         * This is due to some boards consuming sufficient power when driver is
         * up to overheat if fan fails.
         */
@@ -3836,7 +3831,6 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
        set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
        smp_mb__after_clear_bit();
        schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
 }
 
 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4106,7 +4100,7 @@ static void bnx2x_clear_reset_global(struct bnx2x *bp)
  */
 static bool bnx2x_reset_is_global(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
        DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
        return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
@@ -4157,7 +4151,7 @@ void bnx2x_set_reset_in_progress(struct bnx2x *bp)
  */
 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
 {
-       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+       u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
        u32 bit = engine ?
                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
 
@@ -4260,13 +4254,18 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
        return val != 0;
 }
 
+static void _print_parity(struct bnx2x *bp, u32 reg)
+{
+       pr_cont(" [0x%08x] ", REG_RD(bp, reg));
+}
+
 static void _print_next_block(int idx, const char *blk)
 {
        pr_cont("%s%s", idx ? ", " : "", blk);
 }
 
-static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
-                                          bool print)
+static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+                                           int par_num, bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -4275,33 +4274,54 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "BRB");
+                                       _print_parity(bp,
+                                                     BRB1_REG_BRB1_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "PARSER");
+                                       _print_parity(bp, PRS_REG_PRS_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "TSDM");
+                                       _print_parity(bp,
+                                                     TSDM_REG_TSDM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++,
                                                          "SEARCHER");
+                                       _print_parity(bp, SRC_REG_SRC_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "TCM");
+                                       _print_parity(bp,
+                                                     TCM_REG_TCM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "TSEMI");
+                                       _print_parity(bp,
+                                                     TSEM_REG_TSEM_PRTY_STS_0);
+                                       _print_parity(bp,
+                                                     TSEM_REG_TSEM_PRTY_STS_1);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "XPB");
+                                       _print_parity(bp, GRCBASE_XPB +
+                                                         PB_REG_PB_PRTY_STS);
+                               }
                                break;
                        }
 
@@ -4313,8 +4333,9 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
        return par_num;
 }
 
-static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
-                                          bool *global, bool print)
+static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+                                           int par_num, bool *global,
+                                           bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -4323,37 +4344,66 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "PBF");
+                                       _print_parity(bp, PBF_REG_PBF_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "QM");
+                                       _print_parity(bp, QM_REG_QM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "TM");
+                                       _print_parity(bp, TM_REG_TM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "XSDM");
+                                       _print_parity(bp,
+                                                     XSDM_REG_XSDM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "XCM");
+                                       _print_parity(bp, XCM_REG_XCM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "XSEMI");
+                                       _print_parity(bp,
+                                                     XSEM_REG_XSEM_PRTY_STS_0);
+                                       _print_parity(bp,
+                                                     XSEM_REG_XSEM_PRTY_STS_1);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++,
                                                          "DOORBELLQ");
+                                       _print_parity(bp,
+                                                     DORQ_REG_DORQ_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "NIG");
+                                       if (CHIP_IS_E1x(bp)) {
+                                               _print_parity(bp,
+                                                       NIG_REG_NIG_PRTY_STS);
+                                       } else {
+                                               _print_parity(bp,
+                                                       NIG_REG_NIG_PRTY_STS_0);
+                                               _print_parity(bp,
+                                                       NIG_REG_NIG_PRTY_STS_1);
+                                       }
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
                                if (print)
@@ -4362,32 +4412,52 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
                                *global = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "DEBUG");
+                                       _print_parity(bp, DBG_REG_DBG_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "USDM");
+                                       _print_parity(bp,
+                                                     USDM_REG_USDM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "UCM");
+                                       _print_parity(bp, UCM_REG_UCM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "USEMI");
+                                       _print_parity(bp,
+                                                     USEM_REG_USEM_PRTY_STS_0);
+                                       _print_parity(bp,
+                                                     USEM_REG_USEM_PRTY_STS_1);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "UPB");
+                                       _print_parity(bp, GRCBASE_UPB +
+                                                         PB_REG_PB_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "CSDM");
+                                       _print_parity(bp,
+                                                     CSDM_REG_CSDM_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "CCM");
+                                       _print_parity(bp, CCM_REG_CCM_PRTY_STS);
+                               }
                                break;
                        }
 
@@ -4399,8 +4469,8 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
        return par_num;
 }
 
-static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
-                                          bool print)
+static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+                                           int par_num, bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -4409,12 +4479,23 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "CSEMI");
+                                       _print_parity(bp,
+                                                     CSEM_REG_CSEM_PRTY_STS_0);
+                                       _print_parity(bp,
+                                                     CSEM_REG_CSEM_PRTY_STS_1);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "PXP");
+                                       _print_parity(bp, PXP_REG_PXP_PRTY_STS);
+                                       _print_parity(bp,
+                                                     PXP2_REG_PXP2_PRTY_STS_0);
+                                       _print_parity(bp,
+                                                     PXP2_REG_PXP2_PRTY_STS_1);
+                               }
                                break;
                        case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
                                if (print)
@@ -4422,24 +4503,42 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
                                        "PXPPCICLOCKCLIENT");
                                break;
                        case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "CFC");
+                                       _print_parity(bp,
+                                                     CFC_REG_CFC_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "CDU");
+                                       _print_parity(bp, CDU_REG_CDU_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "DMAE");
+                                       _print_parity(bp,
+                                                     DMAE_REG_DMAE_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "IGU");
+                                       if (CHIP_IS_E1x(bp))
+                                               _print_parity(bp,
+                                                       HC_REG_HC_PRTY_STS);
+                                       else
+                                               _print_parity(bp,
+                                                       IGU_REG_IGU_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "MISC");
+                                       _print_parity(bp,
+                                                     MISC_REG_MISC_PRTY_STS);
+                               }
                                break;
                        }
 
@@ -4493,8 +4592,8 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
        return par_num;
 }
 
-static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
-                                          bool print)
+static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+                                           int par_num, bool print)
 {
        int i = 0;
        u32 cur_bit = 0;
@@ -4503,12 +4602,18 @@ static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "PGLUE_B");
+                                       _print_parity(bp,
+                                               PGLUE_B_REG_PGLUE_B_PRTY_STS);
+                               }
                                break;
                        case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
-                               if (print)
+                               if (print) {
                                        _print_next_block(par_num++, "ATC");
+                                       _print_parity(bp,
+                                                     ATC_REG_ATC_PRTY_STS);
+                               }
                                break;
                        }
 
@@ -4539,15 +4644,15 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                if (print)
                        netdev_err(bp->dev,
                                   "Parity errors detected in blocks: ");
-               par_num = bnx2x_check_blocks_with_parity0(
+               par_num = bnx2x_check_blocks_with_parity0(bp,
                        sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
-               par_num = bnx2x_check_blocks_with_parity1(
+               par_num = bnx2x_check_blocks_with_parity1(bp,
                        sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity2(
+               par_num = bnx2x_check_blocks_with_parity2(bp,
                        sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
                par_num = bnx2x_check_blocks_with_parity3(
                        sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity4(
+               par_num = bnx2x_check_blocks_with_parity4(bp,
                        sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
 
                if (print)
@@ -4591,7 +4696,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
        return bnx2x_parity_attn(bp, global, print, attn.sig);
 }
 
-
 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
 {
        u32 val;
@@ -4643,7 +4747,6 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
                (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
                    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
        }
-
 }
 
 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
@@ -4878,7 +4981,6 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
                BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
        else if (rc > 0)
                DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
-
 }
 
 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
@@ -5009,7 +5111,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
        hw_cons = le16_to_cpu(*bp->eq_cons_sb);
 
        /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
-        * when we get the the next-page we nned to adjust so the loop
+        * when we get the next-page we need to adjust so the loop
         * condition below will be met. The next element is the size of a
         * regular element and hence incrementing by 1
         */
@@ -5075,8 +5177,6 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                        if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
                                break;
 
-
-
                        goto next_spqe;
 
                case EVENT_RING_OPCODE_STOP_TRAFFIC:
@@ -5218,7 +5318,7 @@ static void bnx2x_sp_task(struct work_struct *work)
 
        DP(BNX2X_MSG_SP, "sp task invoked\n");
 
-       /* make sure the atomic interupt_occurred has been written */
+       /* make sure the atomic interrupt_occurred has been written */
        smp_rmb();
        if (atomic_read(&bp->interrupt_occurred)) {
 
@@ -5265,7 +5365,6 @@ static void bnx2x_sp_task(struct work_struct *work)
                /* ack status block only if something was actually handled */
                bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
                             le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
-
        }
 
        /* must be called after the EQ processing (since eq leads to sriov
@@ -5316,7 +5415,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
 
 /* end of slow path */
 
-
 void bnx2x_drv_pulse(struct bnx2x *bp)
 {
        SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
@@ -5360,7 +5458,7 @@ static void bnx2x_timer(unsigned long data)
 
        /* sample pf vf bulletin board for new posts from pf */
        if (IS_VF(bp))
-               bnx2x_sample_bulletin(bp);
+               bnx2x_timer_sriov(bp);
 
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
@@ -5382,7 +5480,6 @@ static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
        else
                for (i = 0; i < len; i++)
                        REG_WR8(bp, addr + i, fill);
-
 }
 
 /* helper: writes FP SP data to FW - data_size in dwords */
@@ -5461,10 +5558,8 @@ static void bnx2x_zero_sp_sb(struct bnx2x *bp)
        bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
                        CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
                        CSTORM_SP_SYNC_BLOCK_SIZE);
-
 }
 
-
 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
                                           int igu_sb_id, int igu_seg_id)
 {
@@ -5474,7 +5569,6 @@ static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
        hc_sm->time_to_expire = 0xFFFFFFFF;
 }
 
-
 /* allocates state machine ids. */
 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
 {
@@ -5700,7 +5794,7 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
        bp->eq_cons = 0;
        bp->eq_prod = NUM_EQ_DESC;
        bp->eq_cons_sb = BNX2X_EQ_INDEX;
-       /* we want a warning message before it gets rought... */
+       /* we want a warning message before it gets wrought... */
        atomic_set(&bp->eq_spq_left,
                min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
 }
@@ -5784,7 +5878,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
 
                break;
        case BNX2X_RX_MODE_PROMISC:
-               /* According to deffinition of SI mode, iface in promisc mode
+               /* According to definition of SI mode, iface in promisc mode
                 * should receive matched and unmatched (in resolution of port)
                 * unicast packets.
                 */
@@ -5927,7 +6021,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
        /* init shortcut */
        fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
 
-       /* Setup SB indicies */
+       /* Setup SB indices */
        fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
 
        /* Configure Queue State object */
@@ -5983,6 +6077,8 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
                                    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
        }
 
+       *txdata->tx_cons_sb = cpu_to_le16(0);
+
        SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
        txdata->tx_db.data.zero_fill1 = 0;
        txdata->tx_db.data.prod = 0;
@@ -6001,6 +6097,7 @@ static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
        for_each_tx_queue_cnic(bp, i)
                bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
 }
+
 static void bnx2x_init_tx_rings(struct bnx2x *bp)
 {
        int i;
@@ -6043,11 +6140,6 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
        bnx2x_init_rx_rings(bp);
        bnx2x_init_tx_rings(bp);
 
-       if (IS_VF(bp)) {
-               bnx2x_memset_stats(bp);
-               return;
-       }
-
        if (IS_PF(bp)) {
                /* Initialize MOD_ABS interrupts */
                bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
@@ -6058,6 +6150,8 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
                bnx2x_init_def_sb(bp);
                bnx2x_update_dsb_idx(bp);
                bnx2x_init_sp_ring(bp);
+       } else {
+               bnx2x_memset_stats(bp);
        }
 }
 
@@ -6236,7 +6330,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
                if (val == 0x10)
                        break;
 
-               msleep(10);
+               usleep_range(10000, 20000);
                count--;
        }
        if (val != 0x10) {
@@ -6251,7 +6345,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
                if (val == 1)
                        break;
 
-               msleep(10);
+               usleep_range(10000, 20000);
                count--;
        }
        if (val != 0x1) {
@@ -6292,7 +6386,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
                if (val == 0xb0)
                        break;
 
-               msleep(10);
+               usleep_range(10000, 20000);
                count--;
        }
        if (val != 0xb0) {
@@ -6681,7 +6775,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
  *                 stay set)
  *             f.  If this is VNIC 3 of a port then also init
  *                 first_timers_ilt_entry to zero and last_timers_ilt_entry
- *                 to the last enrty in the ILT.
+ *                 to the last entry in the ILT.
  *
  *     Notes:
  *     Currently the PF error in the PGLC is non recoverable.
@@ -6772,7 +6866,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
 
        bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
 
-
        /* QM queues pointers table */
        bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
 
@@ -7013,7 +7106,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        u32 low, high;
        u32 val;
 
-
        DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
 
        REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -7078,7 +7170,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
                            BRB1_REG_MAC_GUARANTIED_1 :
                            BRB1_REG_MAC_GUARANTIED_0), 40);
 
-
        bnx2x_init_block(bp, BLOCK_PRS, init_phase);
        if (CHIP_IS_E3B0(bp)) {
                if (IS_MF_AFEX(bp)) {
@@ -7150,8 +7241,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
 
        bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
        /* init aeu_mask_attn_func_0/1:
-        *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
-        *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+        *  - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
+        *  - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
         *             bits 4-7 are used for "per vn group attention" */
        val = IS_MF(bp) ? 0xF7 : 0x7;
        /* Enable DCBX attention for all but E1 */
@@ -7275,7 +7366,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
        while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
                msleep(20);
 
-
        if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
                DP(NETIF_MSG_HW,
                   "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
@@ -7295,7 +7385,6 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
                bnx2x_ilt_wr(bp, i, 0);
 }
 
-
 static void bnx2x_init_searcher(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
@@ -7331,7 +7420,6 @@ static int bnx2x_reset_nic_mode(struct bnx2x *bp)
        int rc, i, port = BP_PORT(bp);
        int vlan_en = 0, mac_en[NUM_MACS];
 
-
        /* Close input from network */
        if (bp->mf_mode == SINGLE_FUNCTION) {
                bnx2x_set_rx_filter(&bp->link_params, 0);
@@ -7406,7 +7494,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
        bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
 
        if (CONFIGURE_NIC_MODE(bp)) {
-               /* Configrue searcher as part of function hw init */
+               /* Configure searcher as part of function hw init */
                bnx2x_init_searcher(bp);
 
                /* Reset NIC mode */
@@ -7479,8 +7567,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
        } else {
                /* Set NIC mode */
                REG_WR(bp, PRS_REG_NIC_MODE, 1);
-               DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
-
+               DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
        }
 
        if (!CHIP_IS_E1x(bp)) {
@@ -7677,7 +7764,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
                        }
                        bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
 
-                       /* !!! these should become driver const once
+                       /* !!! These should become driver const once
                           rf-tool supports split-68 const */
                        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
                        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
@@ -7734,7 +7821,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
        return 0;
 }
 
-
 void bnx2x_free_mem_cnic(struct bnx2x *bp)
 {
        bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
@@ -7779,7 +7865,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
        bnx2x_iov_free_mem(bp);
 }
 
-
 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
 {
        if (!CHIP_IS_E1x(bp))
@@ -7793,7 +7878,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
                                       host_hc_status_block_e1x));
 
        if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
-               /* allocate searcher T2 table, as it wan't allocated before */
+               /* allocate searcher T2 table, as it wasn't allocated before */
                BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
 
        /* write address to which L5 should insert its values */
@@ -8068,7 +8153,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
                   ilt_client->page_size,
                   ilt_client->flags,
                   ilog2(ilt_client->page_size >> 12));
-
        }
 
        if (CNIC_SUPPORT(bp)) {
@@ -8124,7 +8208,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
        struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
 {
-
        u8 cos;
        int cxt_index, cxt_offset;
 
@@ -8133,7 +8216,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
                __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
                __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
 
-               /* If HC is supporterd, enable host coalescing in the transition
+               /* If HC is supported, enable host coalescing in the transition
                 * to INIT state.
                 */
                __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
@@ -8205,7 +8288,6 @@ static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        return bnx2x_queue_state_change(bp, q_params);
 }
 
-
 /**
  * bnx2x_setup_queue - setup queue
  *
@@ -8254,7 +8336,6 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
        DP(NETIF_MSG_IFUP, "init complete\n");
 
-
        /* Now move the Queue to the SETUP state... */
        memset(setup_params, 0, sizeof(*setup_params));
 
@@ -8315,7 +8396,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
        /* We want to wait for completion in this context */
        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 
-
        /* close tx-only connections */
        for (tx_index = FIRST_TX_ONLY_COS_INDEX;
             tx_index < fp->max_cos;
@@ -8369,7 +8449,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
        return bnx2x_queue_state_change(bp, &q_params);
 }
 
-
 static void bnx2x_reset_func(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
@@ -8422,7 +8501,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
                 * scan to complete
                 */
                for (i = 0; i < 200; i++) {
-                       msleep(10);
+                       usleep_range(10000, 20000);
                        if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
                                break;
                }
@@ -8623,14 +8702,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
 
        /*
         * (assumption: No Attention from MCP at this stage)
-        * PMF probably in the middle of TXdisable/enable transaction
+        * PMF probably in the middle of TX disable/enable transaction
         * 1. Sync IRS for default SB
-        * 2. Sync SP queue - this guarantes us that attention handling started
-        * 3. Wait, that TXdisable/enable transaction completes
+        * 2. Sync SP queue - this guarantees us that attention handling started
+        * 3. Wait, that TX disable/enable transaction completes
         *
-        * 1+2 guranty that if DCBx attention was scheduled it already changed
-        * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
-        * received complettion for the transaction the state is TX_STOPPED.
+        * 1+2 guarantee that if DCBx attention was scheduled it already changed
+        * pending bit of transaction from STARTED-->TX_STOPPED, if we already
+        * received completion for the transaction the state is TX_STOPPED.
         * State will return to STARTED after completion of TX_STOPPED-->STARTED
         * transaction.
         */
@@ -8660,7 +8739,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
                struct bnx2x_func_state_params func_params = {NULL};
 
                DP(NETIF_MSG_IFDOWN,
-                  "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+                  "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
 
                func_params.f_obj = &bp->func_obj;
                __set_bit(RAMROD_DRV_CLR_ONLY,
@@ -8740,7 +8819,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        bnx2x_iov_chip_cleanup(bp);
 
-
        /*
         * Send the UNLOAD_REQUEST to the MCP. This will return if
         * this function should perform FUNC, PORT or COMMON HW
@@ -8750,7 +8828,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        /*
         * (assumption: No Attention from MCP at this stage)
-        * PMF probably in the middle of TXdisable/enable transaction
+        * PMF probably in the middle of TX disable/enable transaction
         */
        rc = bnx2x_func_wait_started(bp);
        if (rc) {
@@ -8813,7 +8891,6 @@ unload_error:
        if (rc)
                BNX2X_ERR("HW_RESET failed\n");
 
-
        /* Report UNLOAD_DONE to MCP */
        bnx2x_send_unload_done(bp, keep_link);
 }
@@ -9179,7 +9256,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
        if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
                return -EAGAIN;
 
-
        /* TBD: Indicate that "process kill" is in progress to MCP */
 
        /* Clear "unprepared" bit */
@@ -9367,7 +9443,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                                 * the first leader that performs a
                                 * leader_reset() reset the global blocks in
                                 * order to clear global attentions. Otherwise
-                                * the the gates will remain closed for that
+                                * the gates will remain closed for that
                                 * engine.
                                 */
                                if (load_status ||
@@ -9480,14 +9556,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                return;
        }
 
-       /* if stop on error is defined no recovery flows should be executed */
+       if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
 #ifdef BNX2X_STOP_ON_ERROR
-       BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
-                 "you will need to reboot when done\n");
-       goto sp_rtnl_not_reset;
+               BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+                         "you will need to reboot when done\n");
+               goto sp_rtnl_not_reset;
 #endif
-
-       if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
                /*
                 * Clear all pending SP commands as we are going to reset the
                 * function anyway.
@@ -9502,6 +9576,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
        }
 
        if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+#ifdef BNX2X_STOP_ON_ERROR
+               BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+                         "you will need to reboot when done\n");
+               goto sp_rtnl_not_reset;
+#endif
+
                /*
                 * Clear all pending SP commands as we are going to reset the
                 * function anyway.
@@ -9540,6 +9620,13 @@ sp_rtnl_not_reset:
                   "sending set mcast vf pf channel message from rtnl sp-task\n");
                bnx2x_vfpf_set_mcast(bp->dev);
        }
+       if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+                              &bp->sp_rtnl_state)){
+               if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
+                       bnx2x_tx_disable(bp);
+                       BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
+               }
+       }
 
        if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
                               &bp->sp_rtnl_state)) {
@@ -9647,7 +9734,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
                        REG_WR(bp, vals->bmac_addr, wb_data[0]);
                        REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
-
                }
                BNX2X_DEV_INFO("Disable emac Rx\n");
                vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
@@ -9681,7 +9767,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 
        if (mac_stopped)
                msleep(20);
-
 }
 
 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
@@ -9780,6 +9865,21 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
        return rc;
 }
 
+bool bnx2x_port_after_undi(struct bnx2x *bp)
+{
+       struct bnx2x_prev_path_list *entry;
+       bool val;
+
+       down(&bnx2x_prev_sem);
+
+       entry = bnx2x_prev_path_get_entry(bp);
+       val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
+
+       up(&bnx2x_prev_sem);
+
+       return val;
+}
+
 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
 {
        struct bnx2x_prev_path_list *tmp_list;
@@ -9839,7 +9939,6 @@ static int bnx2x_do_flr(struct bnx2x *bp)
        u16 status;
        struct pci_dev *dev = bp->pdev;
 
-
        if (CHIP_IS_E1x(bp)) {
                BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
                return -EINVAL;
@@ -9986,7 +10085,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 
                if (!timer_count)
                        BNX2X_ERR("Failed to empty BRB, hope for the best\n");
-
        }
 
        /* No packets are in the pipeline, path is ready for reset */
@@ -10036,7 +10134,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
 {
        int time_counter = 10;
        u32 rc, fw, hw_lock_reg, hw_lock_val;
-       struct bnx2x_prev_path_list *prev_list;
        BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
 
        /* clear hw from errors which may have resulted from an interrupted
@@ -10049,7 +10146,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
                      (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
                      (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
 
-       hw_lock_val = (REG_RD(bp, hw_lock_reg));
+       hw_lock_val = REG_RD(bp, hw_lock_reg);
        if (hw_lock_val) {
                if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
                        BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
@@ -10064,7 +10161,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
 
        if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
                BNX2X_DEV_INFO("Release previously held alr\n");
-               REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
+               bnx2x_release_alr(bp);
        }
 
        do {
@@ -10093,7 +10190,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
                        break;
                }
 
-               /* non-common reply from MCP night require looping */
+               /* non-common reply from MCP might require looping */
                rc = bnx2x_prev_unload_uncommon(bp);
                if (rc != BNX2X_PREV_WAIT_NEEDED)
                        break;
@@ -10107,8 +10204,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
        }
 
        /* Mark function if its port was used to boot from SAN */
-       prev_list = bnx2x_prev_path_get_entry(bp);
-       if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
+       if (bnx2x_port_after_undi(bp))
                bp->link_params.feature_config_flags |=
                        FEATURE_CONFIG_BOOT_FROM_SAN;
 
@@ -10192,8 +10288,6 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
 
        bnx2x_init_shmem(bp);
 
-
-
        bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
                                        MISC_REG_GENERIC_CR_1 :
                                        MISC_REG_GENERIC_CR_0));
@@ -10455,6 +10549,9 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
                        bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
 
+               if (!(bp->link_params.speed_cap_mask[idx] &
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+                       bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
        }
 
        BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
@@ -10765,7 +10862,6 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
         */
        if (!bp->cnic_eth_dev.max_iscsi_conn)
                bp->flags |= no_flags;
-
 }
 
 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
@@ -10782,12 +10878,56 @@ static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
        bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
                MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
 }
+
+static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
+{
+       u8 count = 0;
+
+       if (IS_MF(bp)) {
+               u8 fid;
+
+               /* iterate over absolute function ids for this path: */
+               for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
+                       if (IS_MF_SD(bp)) {
+                               u32 cfg = MF_CFG_RD(bp,
+                                                   func_mf_config[fid].config);
+
+                               if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
+                                   ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
+                                           FUNC_MF_CFG_PROTOCOL_FCOE))
+                                       count++;
+                       } else {
+                               u32 cfg = MF_CFG_RD(bp,
+                                                   func_ext_config[fid].
+                                                                     func_cfg);
+
+                               if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
+                                   (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
+                                       count++;
+                       }
+               }
+       } else { /* SF */
+               int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
+
+               for (port = 0; port < port_cnt; port++) {
+                       u32 lic = SHMEM_RD(bp,
+                                          drv_lic_key[port].max_fcoe_conn) ^
+                                 FW_ENCODE_32BIT_PATTERN;
+                       if (lic)
+                               count++;
+               }
+       }
+
+       return count;
+}
+
 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
        int func = BP_ABS_FUNC(bp);
        u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
                                drv_lic_key[port].max_fcoe_conn);
+       u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
 
        if (!CNIC_SUPPORT(bp)) {
                bp->flags |= NO_FCOE_FLAG;
@@ -10801,9 +10941,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
 
        /* Calculate the number of maximum allowed FCoE tasks */
        bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
-       if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
-               bp->cnic_eth_dev.max_fcoe_exchanges /=
-                                               MAX_FCOE_FUNCS_PER_ENGINE;
+
+       /* check if FCoE resources must be shared between different functions */
+       if (num_fcoe_func)
+               bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
 
        /* Read the WWN: */
        if (!IS_MF(bp)) {
@@ -11031,7 +11172,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
        } else {
                bp->common.int_block = INT_BLOCK_IGU;
 
-               /* do not allow device reset during IGU info preocessing */
+               /* do not allow device reset during IGU info processing */
                bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
 
                val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@@ -11110,7 +11251,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
                                E1H_FUNC_MAX * sizeof(struct drv_func_mb);
                /*
                 * get mf configuration:
-                * 1. existence of MF configuration
+                * 1. Existence of MF configuration
                 * 2. MAC address must be legal (check only upper bytes)
                 *    for  Switch-Independent mode;
                 *    OVLAN must be legal for Switch-Dependent mode
@@ -11384,7 +11525,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->fw_mb_mutex);
        spin_lock_init(&bp->stats_lock);
 
-
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
        INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
@@ -11393,7 +11533,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
                if (rc)
                        return rc;
        } else {
-               random_ether_addr(bp->dev->dev_addr);
+               eth_zero_addr(bp->dev->dev_addr);
        }
 
        bnx2x_set_modes_bitmap(bp);
@@ -11417,7 +11557,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
                bnx2x_prev_unload(bp);
        }
 
-
        if (CHIP_REV_IS_FPGA(bp))
                dev_err(&bp->pdev->dev, "FPGA detected\n");
 
@@ -11489,7 +11628,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        /* We need at least one default status block for slow-path events,
         * second status block for the L2 queue, and a third status block for
-        * CNIC if supproted.
+        * CNIC if supported.
         */
        if (CNIC_SUPPORT(bp))
                bp->min_msix_vec_cnt = 3;
@@ -11500,7 +11639,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        return rc;
 }
 
-
 /****************************************************************************
 * General service functions
 ****************************************************************************/
@@ -11585,9 +11723,6 @@ static int bnx2x_close(struct net_device *dev)
        /* Unload the driver, release IRQs */
        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
 
-       /* Power off */
-       bnx2x_set_power_state(bp, PCI_D3hot);
-
        return 0;
 }
 
@@ -11852,6 +11987,10 @@ static int bnx2x_validate_addr(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
+       /* query the bulletin board for mac address configured by the PF */
+       if (IS_VF(bp))
+               bnx2x_sample_bulletin(bp);
+
        if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
                BNX2X_ERR("Non-valid Ethernet address\n");
                return -EADDRNOTAVAIL;
@@ -11878,12 +12017,16 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_setup_tc           = bnx2x_setup_tc,
 #ifdef CONFIG_BNX2X_SRIOV
        .ndo_set_vf_mac         = bnx2x_set_vf_mac,
-       .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
+       .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
        .ndo_get_vf_config      = bnx2x_get_vf_config,
 #endif
 #ifdef NETDEV_FCOE_WWNN
        .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
 #endif
+
+#ifdef CONFIG_NET_LL_RX_POLL
+       .ndo_ll_poll            = bnx2x_low_latency_recv,
+#endif
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -11959,7 +12102,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
        }
 
        if (IS_PF(bp)) {
-               bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+               bp->pm_cap = pdev->pm_cap;
                if (bp->pm_cap == 0) {
                        dev_err(&bp->pdev->dev,
                                "Cannot find power management capability, aborting\n");
@@ -12008,8 +12151,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
        }
        BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
 
-       bnx2x_set_power_state(bp, PCI_D0);
-
        /* clean indirect addresses */
        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
                               PCICFG_VENDOR_ID_OFFSET);
@@ -12094,15 +12235,26 @@ err_out:
        return rc;
 }
 
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
+static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
+                                      enum bnx2x_pci_bus_speed *speed)
 {
-       u32 val = 0;
+       u32 link_speed, val = 0;
 
        pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
        *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
 
-       /* return value of 1=2.5GHz 2=5GHz */
-       *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+       link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+
+       switch (link_speed) {
+       case 3:
+               *speed = BNX2X_PCI_LINK_SPEED_8000;
+               break;
+       case 2:
+               *speed = BNX2X_PCI_LINK_SPEED_5000;
+               break;
+       default:
+               *speed = BNX2X_PCI_LINK_SPEED_2500;
+       }
 }
 
 static int bnx2x_check_firmware(struct bnx2x *bp)
@@ -12327,7 +12479,6 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
        bp->firmware = NULL;
 }
 
-
 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
        .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
        .init_hw_cmn      = bnx2x_init_hw_common,
@@ -12465,7 +12616,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 {
        struct net_device *dev = NULL;
        struct bnx2x *bp;
-       int pcie_width, pcie_speed;
+       int pcie_width;
+       enum bnx2x_pci_bus_speed pcie_speed;
        int rc, max_non_def_sbs;
        int rx_count, tx_count, rss_count, doorbell_size;
        int max_cos_est;
@@ -12605,7 +12757,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        }
        BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
 
-
        if (!NO_FCOE(bp)) {
                /* Add storage MAC address */
                rtnl_lock();
@@ -12617,15 +12768,15 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
                       pcie_width, pcie_speed);
 
-       BNX2X_DEV_INFO(
-               "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
-                   board_info[ent->driver_data].name,
-                   (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
-                   pcie_width,
-                   ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
-                    (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
-                   "5GHz (Gen2)" : "2.5GHz",
-                   dev->base_addr, bp->pdev->irq, dev->dev_addr);
+       BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+                      board_info[ent->driver_data].name,
+                      (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+                      pcie_width,
+                      pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
+                      pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
+                      pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+                      "Unknown",
+                      dev->base_addr, bp->pdev->irq, dev->dev_addr);
 
        return 0;
 
@@ -12647,17 +12798,11 @@ init_one_exit:
        return rc;
 }
 
-static void bnx2x_remove_one(struct pci_dev *pdev)
+static void __bnx2x_remove(struct pci_dev *pdev,
+                          struct net_device *dev,
+                          struct bnx2x *bp,
+                          bool remove_netdev)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct bnx2x *bp;
-
-       if (!dev) {
-               dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
-               return;
-       }
-       bp = netdev_priv(dev);
-
        /* Delete storage MAC address */
        if (!NO_FCOE(bp)) {
                rtnl_lock();
@@ -12670,7 +12815,17 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
        bnx2x_dcbnl_update_applist(bp, true);
 #endif
 
-       unregister_netdev(dev);
+       /* Close the interface - either directly or implicitly */
+       if (remove_netdev) {
+               unregister_netdev(dev);
+       } else {
+               rtnl_lock();
+               if (netif_running(dev))
+                       bnx2x_close(dev);
+               rtnl_unlock();
+       }
+
+       bnx2x_iov_remove_one(bp);
 
        /* Power on: we can't let PCI layer write to us while we are in D3 */
        if (IS_PF(bp))
@@ -12686,12 +12841,16 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
        /* Make sure RESET task is not scheduled before continuing */
        cancel_delayed_work_sync(&bp->sp_rtnl_task);
 
-       bnx2x_iov_remove_one(bp);
-
        /* send message via vfpf channel to release the resources of this vf */
        if (IS_VF(bp))
                bnx2x_vfpf_release(bp);
 
+       /* Assumes no further PCIe PM changes will occur */
+       if (system_state == SYSTEM_POWER_OFF) {
+               pci_wake_from_d3(pdev, bp->wol);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+
        if (bp->regview)
                iounmap(bp->regview);
 
@@ -12706,7 +12865,8 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
        }
        bnx2x_free_mem_bp(bp);
 
-       free_netdev(dev);
+       if (remove_netdev)
+               free_netdev(dev);
 
        if (atomic_read(&pdev->enable_cnt) == 1)
                pci_release_regions(pdev);
@@ -12715,6 +12875,20 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
+static void bnx2x_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnx2x *bp;
+
+       if (!dev) {
+               dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+               return;
+       }
+       bp = netdev_priv(dev);
+
+       __bnx2x_remove(pdev, dev, bp, true);
+}
+
 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 {
        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
@@ -12747,19 +12921,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        return 0;
 }
 
-static void bnx2x_eeh_recover(struct bnx2x *bp)
-{
-       u32 val;
-
-       mutex_init(&bp->port.phy_mutex);
-
-
-       val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
-       if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-               != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-               BNX2X_ERR("BAD MCP validity signature\n");
-}
-
 /**
  * bnx2x_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -12828,6 +12989,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 
        if (netif_running(dev)) {
                BNX2X_ERR("IO slot reset --> driver unload\n");
+
+               /* MCP should have been reset; Need to wait for validity */
+               bnx2x_init_shmem(bp);
+
                if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
                        u32 v;
 
@@ -12849,7 +13014,7 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 
                bnx2x_prev_unload(bp);
 
-               /* We should have resetted the engine, so It's fair to
+               /* We should have reseted the engine, so It's fair to
                 * assume the FW will no longer write to the bnx2x driver.
                 */
                bnx2x_squeeze_objects(bp);
@@ -12886,8 +13051,6 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
 
        rtnl_lock();
 
-       bnx2x_eeh_recover(bp);
-
        bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
                                                        DRV_MSG_SEQ_NUMBER_MASK;
 
@@ -12905,6 +13068,29 @@ static const struct pci_error_handlers bnx2x_err_handler = {
        .resume         = bnx2x_io_resume,
 };
 
+static void bnx2x_shutdown(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnx2x *bp;
+
+       if (!dev)
+               return;
+
+       bp = netdev_priv(dev);
+       if (!bp)
+               return;
+
+       rtnl_lock();
+       netif_device_detach(dev);
+       rtnl_unlock();
+
+       /* Don't remove the netdevice, as there are scenarios which will cause
+        * the kernel to hang, e.g., when trying to remove bnx2i while the
+        * rootfs is mounted from SAN.
+        */
+       __bnx2x_remove(pdev, dev, bp, false);
+}
+
 static struct pci_driver bnx2x_pci_driver = {
        .name        = DRV_MODULE_NAME,
        .id_table    = bnx2x_pci_tbl,
@@ -12916,6 +13102,7 @@ static struct pci_driver bnx2x_pci_driver = {
 #ifdef CONFIG_BNX2X_SRIOV
        .sriov_configure = bnx2x_sriov_configure,
 #endif
+       .shutdown    = bnx2x_shutdown,
 };
 
 static int __init bnx2x_init(void)
@@ -12941,11 +13128,12 @@ static int __init bnx2x_init(void)
 static void __exit bnx2x_cleanup(void)
 {
        struct list_head *pos, *q;
+
        pci_unregister_driver(&bnx2x_pci_driver);
 
        destroy_workqueue(bnx2x_wq);
 
-       /* Free globablly allocated resources */
+       /* Free globally allocated resources */
        list_for_each_safe(pos, q, &bnx2x_prev_list) {
                struct bnx2x_prev_path_list *tmp =
                        list_entry(pos, struct bnx2x_prev_path_list, list);
@@ -12968,7 +13156,7 @@ module_exit(bnx2x_cleanup);
  * @bp:                driver handle
  * @set:       set or clear the CAM entry
  *
- * This function will wait until the ramdord completion returns.
+ * This function will wait until the ramrod completion returns.
  * Return 0 if success, -ENODEV if ramrod doesn't return.
  */
 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
@@ -12996,7 +13184,6 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
        BUG_ON(bp->cnic_spq_pending < count);
        bp->cnic_spq_pending -= count;
 
-
        for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
                u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
                                & SPE_HDR_CONN_TYPE) >>
@@ -13169,7 +13356,6 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
        bnx2x_cnic_sp_post(bp, 0);
 }
 
-
 /* Called with netif_addr_lock_bh() taken.
  * Sets an rx_mode config for an iSCSI ETH client.
  * Doesn't block.
@@ -13210,7 +13396,6 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
        }
 }
 
-
 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
 {
        struct bnx2x *bp = netdev_priv(dev);
@@ -13398,7 +13583,6 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
 {
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
-
        cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
                             bnx2x_cid_ilt_lines(bp);
        cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
@@ -13434,7 +13618,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
                        BNX2X_ERR("CNIC-related load failed\n");
                        return rc;
                }
-
        }
 
        bp->cnic_enabled = true;
index d22bc40091ec47738a0132dd549c095645f90702..8e627b886d7b1b401676b3e10f0c6cb573c3e78d 100644 (file)
@@ -35,6 +35,8 @@
 #define ATC_REG_ATC_INT_STS_CLR                                         0x1101c0
 /* [RW 5] Parity mask register #0 read/write */
 #define ATC_REG_ATC_PRTY_MASK                                   0x1101d8
+/* [R 5] Parity register #0 read */
+#define ATC_REG_ATC_PRTY_STS                                    0x1101cc
 /* [RC 5] Parity register #0 read clear */
 #define ATC_REG_ATC_PRTY_STS_CLR                                0x1101d0
 /* [RW 19] Interrupt mask register #0 read/write */
 #define PBF_REG_PBF_INT_STS                                     0x1401c8
 /* [RW 20] Parity mask register #0 read/write */
 #define PBF_REG_PBF_PRTY_MASK                                   0x1401e4
+/* [R 28] Parity register #0 read */
+#define PBF_REG_PBF_PRTY_STS                                    0x1401d8
 /* [RC 20] Parity register #0 read clear */
 #define PBF_REG_PBF_PRTY_STS_CLR                                0x1401dc
 /* [RW 16] The Ethernet type value for L2 tag 0 */
 #define TM_REG_TM_INT_STS                                       0x1640f0
 /* [RW 7] Parity mask register #0 read/write */
 #define TM_REG_TM_PRTY_MASK                                     0x16410c
+/* [R 7] Parity register #0 read */
+#define TM_REG_TM_PRTY_STS                                      0x164100
 /* [RC 7] Parity register #0 read clear */
 #define TM_REG_TM_PRTY_STS_CLR                                  0x164104
 /* [RW 8] The event id for aggregated interrupt 0 */
index 32a9609cc98bcd36e56b0193bba3a3508424f5cb..8f03c984550f328c88764d83f04813b344445b1d 100644 (file)
@@ -35,9 +35,9 @@
 /**
  * bnx2x_exe_queue_init - init the Exe Queue object
  *
- * @o:         poiter to the object
+ * @o:         pointer to the object
  * @exe_len:   length
- * @owner:     poiter to the owner
+ * @owner:     pointer to the owner
  * @validate:  validate function pointer
  * @optimize:  optimize function pointer
  * @exec:      execute function pointer
@@ -142,7 +142,6 @@ free_and_exit:
        spin_unlock_bh(&o->lock);
 
        return rc;
-
 }
 
 static inline void __bnx2x_exe_queue_reset_pending(
@@ -163,13 +162,11 @@ static inline void __bnx2x_exe_queue_reset_pending(
 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
                                                 struct bnx2x_exe_queue_obj *o)
 {
-
        spin_lock_bh(&o->lock);
 
        __bnx2x_exe_queue_reset_pending(bp, o);
 
        spin_unlock_bh(&o->lock);
-
 }
 
 /**
@@ -179,7 +176,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
  * @o:                 queue
  * @ramrod_flags:      flags
  *
- * (Atomicy is ensured using the exe_queue->lock).
+ * (Atomicity is ensured using the exe_queue->lock).
  */
 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
                                       struct bnx2x_exe_queue_obj *o,
@@ -192,8 +189,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
 
        spin_lock_bh(&o->lock);
 
-       /*
-        * Next step should not be performed until the current is finished,
+       /* Next step should not be performed until the current is finished,
         * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
         * properly clear object internals without sending any command to the FW
         * which also implies there won't be any completion to clear the
@@ -209,8 +205,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
                }
        }
 
-       /*
-        * Run through the pending commands list and create a next
+       /* Run through the pending commands list and create a next
         * execution chunk.
         */
        while (!list_empty(&o->exe_queue)) {
@@ -220,8 +215,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
 
                if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
                        cur_len += elem->cmd_len;
-                       /*
-                        * Prevent from both lists being empty when moving an
+                       /* Prevent from both lists being empty when moving an
                         * element. This will allow the call of
                         * bnx2x_exe_queue_empty() without locking.
                         */
@@ -241,14 +235,12 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
 
        rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
        if (rc < 0)
-               /*
-                *  In case of an error return the commands back to the queue
-                *  and reset the pending_comp.
+               /* In case of an error return the commands back to the queue
+                * and reset the pending_comp.
                 */
                list_splice_init(&o->pending_comp, &o->exe_queue);
        else if (!rc)
-               /*
-                * If zero is returned, means there are no outstanding pending
+               /* If zero is returned, means there are no outstanding pending
                 * completions and we may dismiss the pending list.
                 */
                __bnx2x_exe_queue_reset_pending(bp, o);
@@ -308,7 +300,6 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
        /* can take a while if any port is running */
        int cnt = 5000;
 
-
        if (CHIP_REV_IS_EMUL(bp))
                cnt *= 20;
 
@@ -456,7 +447,6 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
                        DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
                           counter, next);
                        next += stride + size;
-
                }
        }
        return counter * ETH_ALEN;
@@ -518,7 +508,6 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
        return 0;
 }
 
-
 /* check_del() callbacks */
 static struct bnx2x_vlan_mac_registry_elem *
        bnx2x_check_mac_del(struct bnx2x *bp,
@@ -609,7 +598,6 @@ static bool bnx2x_check_move_always_err(
        return false;
 }
 
-
 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
 {
        struct bnx2x_raw_obj *raw = &o->raw;
@@ -626,7 +614,6 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
        return rx_tx_flag;
 }
 
-
 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
                          bool add, unsigned char *dev_addr, int index)
 {
@@ -693,7 +680,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
  *
  * @cid:       connection id
  * @type:      BNX2X_FILTER_XXX_PENDING
- * @hdr:       poiter to header to setup
+ * @hdr:       pointer to header to setup
  * @rule_cnt:
  *
  * currently we always configure one rule and echo field to contain a CID and an
@@ -707,7 +694,6 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
        hdr->rule_cnt = (u8)rule_cnt;
 }
 
-
 /* hw_config() callbacks */
 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
                                 struct bnx2x_vlan_mac_obj *o,
@@ -723,8 +709,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
        unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
        u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
 
-       /*
-        * Set LLH CAM entry: currently only iSCSI and ETH macs are
+       /* Set LLH CAM entry: currently only iSCSI and ETH macs are
         * relevant. In addition, current implementation is tuned for a
         * single ETH MAC.
         *
@@ -879,8 +864,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
        struct bnx2x_raw_obj *raw = &o->raw;
        struct mac_configuration_cmd *config =
                (struct mac_configuration_cmd *)(raw->rdata);
-       /*
-        * 57710 and 57711 do not support MOVE command,
+       /* 57710 and 57711 do not support MOVE command,
         * so it's either ADD or DEL
         */
        bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
@@ -960,7 +944,6 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
        u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
        u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
 
-
        /* Reset the ramrod data buffer for the first rule */
        if (rule_idx == 0)
                memset(data, 0, sizeof(*data));
@@ -969,7 +952,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
        bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
                                      &rule_entry->pair.header);
 
-       /* Set VLAN and MAC themselvs */
+       /* Set VLAN and MAC themselves */
        rule_entry->pair.vlan = cpu_to_le16(vlan);
        bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
                              &rule_entry->pair.mac_mid,
@@ -1021,8 +1004,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
        struct bnx2x_raw_obj *raw = &o->raw;
        struct mac_configuration_cmd *config =
                (struct mac_configuration_cmd *)(raw->rdata);
-       /*
-        * 57710 and 57711 do not support MOVE command,
+       /* 57710 and 57711 do not support MOVE command,
         * so it's either ADD or DEL
         */
        bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
@@ -1046,7 +1028,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
  *
  * @bp:                device handle
  * @p:         command parameters
- * @ppos:      pointer to the cooky
+ * @ppos:      pointer to the cookie
  *
  * reconfigure next MAC/VLAN/VLAN-MAC element from the
  * previously configured elements list.
@@ -1054,7 +1036,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is        taken
  * into an account
  *
- * pointer to the cooky  - that should be given back in the next call to make
+ * pointer to the cookie  - that should be given back in the next call to make
  * function handle the next element. If *ppos is set to NULL it will restart the
  * iterator. If returned *ppos == NULL this means that the last element has been
  * handled.
@@ -1102,8 +1084,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
        return bnx2x_config_vlan_mac(bp, p);
 }
 
-/*
- * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
  * pointer to an element with a specific criteria and NULL if such an element
  * hasn't been found.
  */
@@ -1187,8 +1168,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
                return rc;
        }
 
-       /*
-        * Check if there is a pending ADD command for this
+       /* Check if there is a pending ADD command for this
         * MAC/VLAN/VLAN-MAC. Return an error if there is.
         */
        if (exeq->get(exeq, elem)) {
@@ -1196,8 +1176,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
                return -EEXIST;
        }
 
-       /*
-        * TODO: Check the pending MOVE from other objects where this
+       /* TODO: Check the pending MOVE from other objects where this
         * object is a destination object.
         */
 
@@ -1240,8 +1219,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
                return -EEXIST;
        }
 
-       /*
-        * Check if there are pending DEL or MOVE commands for this
+       /* Check if there are pending DEL or MOVE commands for this
         * MAC/VLAN/VLAN-MAC. Return an error if so.
         */
        memcpy(&query_elem, elem, sizeof(query_elem));
@@ -1292,8 +1270,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
        struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
        struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
 
-       /*
-        * Check if we can perform this operation based on the current registry
+       /* Check if we can perform this operation based on the current registry
         * state.
         */
        if (!src_o->check_move(bp, src_o, dest_o,
@@ -1302,8 +1279,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
                return -EINVAL;
        }
 
-       /*
-        * Check if there is an already pending DEL or MOVE command for the
+       /* Check if there is an already pending DEL or MOVE command for the
         * source object or ADD command for a destination object. Return an
         * error if so.
         */
@@ -1392,7 +1368,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
 }
 
 /**
- * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
+ * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
  *
  * @bp:                device handle
  * @o:         bnx2x_vlan_mac_obj
@@ -1550,9 +1526,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
 
                /* Get a new CAM offset */
                if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
-                       /*
-                        * This shell never happen, because we have checked the
-                        * CAM availiability in the 'validate'.
+                       /* This shall never happen, because we have checked the
+                        * CAM availability in the 'validate'.
                         */
                        WARN_ON(1);
                        kfree(reg_elem);
@@ -1599,8 +1574,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
        struct bnx2x_vlan_mac_registry_elem *reg_elem;
        enum bnx2x_vlan_mac_cmd cmd;
 
-       /*
-        * If DRIVER_ONLY execution is requested, cleanup a registry
+       /* If DRIVER_ONLY execution is requested, cleanup a registry
         * and exit. Otherwise send a ramrod to FW.
         */
        if (!drv_only) {
@@ -1609,11 +1583,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
                /* Set pending */
                r->set_pending(r);
 
-               /* Fill tha ramrod data */
+               /* Fill the ramrod data */
                list_for_each_entry(elem, exe_chunk, link) {
                        cmd = elem->cmd_data.vlan_mac.cmd;
-                       /*
-                        * We will add to the target object in MOVE command, so
+                       /* We will add to the target object in MOVE command, so
                         * change the object for a CAM search.
                         */
                        if (cmd == BNX2X_VLAN_MAC_MOVE)
@@ -1646,12 +1619,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
                                idx++;
                }
 
-               /*
-                *  No need for an explicit memory barrier here as long we would
-                *  need to ensure the ordering of writing to the SPQ element
-                *  and updating of the SPQ producer which involves a memory
-                *  read and we will have to put a full memory barrier there
-                *  (inside bnx2x_sp_post()).
+               /* No need for an explicit memory barrier here as long we would
+                * need to ensure the ordering of writing to the SPQ element
+                * and updating of the SPQ producer which involves a memory
+                * read and we will have to put a full memory barrier there
+                * (inside bnx2x_sp_post()).
                 */
 
                rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
@@ -1766,8 +1738,7 @@ int bnx2x_config_vlan_mac(
                        return rc;
        }
 
-       /*
-        * If nothing will be executed further in this iteration we want to
+       /* If nothing will be executed further in this iteration we want to
         * return PENDING if there are pending commands
         */
        if (!bnx2x_exe_queue_empty(&o->exe_queue))
@@ -1786,13 +1757,11 @@ int bnx2x_config_vlan_mac(
                        return rc;
        }
 
-       /*
-        * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+       /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
         * then user want to wait until the last command is done.
         */
        if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
-               /*
-                * Wait maximum for the current exe_queue length iterations plus
+               /* Wait maximum for the current exe_queue length iterations plus
                 * one (for the current pending command).
                 */
                int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
@@ -1818,8 +1787,6 @@ int bnx2x_config_vlan_mac(
        return rc;
 }
 
-
-
 /**
  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
  *
@@ -1829,7 +1796,7 @@ int bnx2x_config_vlan_mac(
  * @ramrod_flags:      execution flags to be used for this deletion
  *
  * if the last operation has completed successfully and there are no
- * moreelements left, positive value if the last operation has completed
+ * more elements left, positive value if the last operation has completed
  * successfully and there are more previously configured elements, negative
  * value is current operation has failed.
  */
@@ -1870,8 +1837,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
        p.ramrod_flags = *ramrod_flags;
        p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 
-       /*
-        * Add all but the last VLAN-MAC to the execution queue without actually
+       /* Add all but the last VLAN-MAC to the execution queue without actually
         * execution anything.
         */
        __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
@@ -1934,7 +1900,6 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
                           state, pstate, type);
 }
 
-
 void bnx2x_init_mac_obj(struct bnx2x *bp,
                        struct bnx2x_vlan_mac_obj *mac_obj,
                        u8 cl_id, u32 cid, u8 func_id, void *rdata,
@@ -2048,8 +2013,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
        /* CAM pool handling */
        vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
        vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
-       /*
-        * CAM offset is relevant for 57710 and 57711 chips only which have a
+       /* CAM offset is relevant for 57710 and 57711 chips only which have a
         * single CAM for both MACs and VLAN-MAC pairs. So the offset
         * will be taken from MACs' pool object only.
         */
@@ -2092,7 +2056,6 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
                                     bnx2x_execute_vlan_mac,
                                     bnx2x_exeq_get_vlan_mac);
        }
-
 }
 
 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
@@ -2117,12 +2080,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
        struct tstorm_eth_mac_filter_config *mac_filters =
                (struct tstorm_eth_mac_filter_config *)p->rdata;
 
-       /* initial seeting is drop-all */
+       /* initial setting is drop-all */
        u8 drop_all_ucast = 1, drop_all_mcast = 1;
        u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
        u8 unmatched_unicast = 0;
 
-    /* In e1x there we only take into account rx acceot flag since tx switching
+    /* In e1x there we only take into account rx accept flag since tx switching
      * isn't enabled. */
        if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
                /* accept matched ucast */
@@ -2245,7 +2208,6 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
        }
 
        cmd->state = cpu_to_le16(state);
-
 }
 
 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
@@ -2286,9 +2248,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
                                               false);
        }
 
-
-       /*
-        * If FCoE Queue configuration has been requested configure the Rx and
+       /* If FCoE Queue configuration has been requested configure the Rx and
         * internal switching modes for this queue in separate rules.
         *
         * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
@@ -2324,8 +2284,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
                }
        }
 
-       /*
-        * Set the ramrod header (most importantly - number of rules to
+       /* Set the ramrod header (most importantly - number of rules to
         * configure).
         */
        bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
@@ -2334,12 +2293,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
                         data->header.rule_cnt, p->rx_accept_flags,
                         p->tx_accept_flags);
 
-       /*
-        *  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long we would
+        * need to ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read and we will have to put a full memory barrier there
+        * (inside bnx2x_sp_post()).
         */
 
        /* Send a ramrod */
@@ -2476,7 +2434,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
                cur_mac = (struct bnx2x_mcast_mac_elem *)
                          ((u8 *)new_cmd + sizeof(*new_cmd));
 
-               /* Push the MACs of the current command into the pendig command
+               /* Push the MACs of the current command into the pending command
                 * MACs list: FIFO
                 */
                list_for_each_entry(pos, &p->mcast_list, link) {
@@ -2909,7 +2867,6 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
        default:
                BNX2X_ERR("Unknown command: %d\n", cmd);
                return -EINVAL;
-
        }
 
        /* Increase the total number of MACs pending to be configured */
@@ -3034,20 +2991,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
        if (!o->total_pending_num)
                bnx2x_mcast_refresh_registry_e2(bp, o);
 
-       /*
-        * If CLEAR_ONLY was requested - don't send a ramrod and clear
+       /* If CLEAR_ONLY was requested - don't send a ramrod and clear
         * RAMROD_PENDING status immediately.
         */
        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
                raw->clear_pending(raw);
                return 0;
        } else {
-               /*
-                *  No need for an explicit memory barrier here as long we would
-                *  need to ensure the ordering of writing to the SPQ element
-                *  and updating of the SPQ producer which involves a memory
-                *  read and we will have to put a full memory barrier there
-                *  (inside bnx2x_sp_post()).
+               /* No need for an explicit memory barrier here as long we would
+                * need to ensure the ordering of writing to the SPQ element
+                * and updating of the SPQ producer which involves a memory
+                * read and we will have to put a full memory barrier there
+                * (inside bnx2x_sp_post()).
                 */
 
                /* Send a ramrod */
@@ -3121,7 +3076,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
        }
 }
 
-/* On 57711 we write the multicast MACs' aproximate match
+/* On 57711 we write the multicast MACs' approximate match
  * table by directly into the TSTORM's internal RAM. So we don't
  * really need to handle any tricks to make it work.
  */
@@ -3223,7 +3178,6 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
        default:
                BNX2X_ERR("Unknown command: %d\n", cmd);
                return -EINVAL;
-
        }
 
        /* We want to ensure that commands are executed one by one for 57710.
@@ -3245,7 +3199,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
 
        /* If current command hasn't been handled yet and we are
         * here means that it's meant to be dropped and we have to
-        * update the number of outstandling MACs accordingly.
+        * update the number of outstanding MACs accordingly.
         */
        if (p->mcast_list_len)
                o->total_pending_num -= o->max_cmd_len;
@@ -3342,7 +3296,6 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
        return -1;
 }
 
-
 static inline int bnx2x_mcast_handle_pending_cmds_e1(
        struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
 {
@@ -3352,7 +3305,6 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
        union bnx2x_mcast_config_data cfg_data = {NULL};
        int cnt = 0;
 
-
        /* If nothing to be done - return */
        if (list_empty(&o->pending_cmds_head))
                return 0;
@@ -3523,20 +3475,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
        if (rc)
                return rc;
 
-       /*
-        * If CLEAR_ONLY was requested - don't send a ramrod and clear
+       /* If CLEAR_ONLY was requested - don't send a ramrod and clear
         * RAMROD_PENDING status immediately.
         */
        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
                raw->clear_pending(raw);
                return 0;
        } else {
-               /*
-                *  No need for an explicit memory barrier here as long we would
-                *  need to ensure the ordering of writing to the SPQ element
-                *  and updating of the SPQ producer which involves a memory
-                *  read and we will have to put a full memory barrier there
-                *  (inside bnx2x_sp_post()).
+               /* No need for an explicit memory barrier here as long we would
+                * need to ensure the ordering of writing to the SPQ element
+                * and updating of the SPQ producer which involves a memory
+                * read and we will have to put a full memory barrier there
+                * (inside bnx2x_sp_post()).
                 */
 
                /* Send a ramrod */
@@ -3550,7 +3500,6 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
                /* Ramrod completion is pending */
                return 1;
        }
-
 }
 
 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
@@ -3848,7 +3797,6 @@ static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
        return true;
 }
 
-
 static bool bnx2x_credit_pool_get_entry(
        struct bnx2x_credit_pool_obj *o,
        int *offset)
@@ -3999,8 +3947,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
 
        } else {
 
-               /*
-                * CAM credit is equaly divided between all active functions
+               /* CAM credit is equaly divided between all active functions
                 * on the PATH.
                 */
                if ((func_num > 0)) {
@@ -4009,8 +3956,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
                        else
                                cam_sz = BNX2X_CAM_SIZE_EMUL;
 
-                       /*
-                        * No need for CAM entries handling for 57712 and
+                       /* No need for CAM entries handling for 57712 and
                         * newer.
                         */
                        bnx2x_init_credit_pool(p, -1, cam_sz);
@@ -4018,7 +3964,6 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
                        /* this should never happen! Block MAC operations. */
                        bnx2x_init_credit_pool(p, 0, 0);
                }
-
        }
 }
 
@@ -4028,14 +3973,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
                                 u8 func_num)
 {
        if (CHIP_IS_E1x(bp)) {
-               /*
-                * There is no VLAN credit in HW on 57710 and 57711 only
+               /* There is no VLAN credit in HW on 57710 and 57711 only
                 * MAC / MAC-VLAN can be set
                 */
                bnx2x_init_credit_pool(p, 0, -1);
        } else {
-               /*
-                * CAM credit is equaly divided between all active functions
+               /* CAM credit is equally divided between all active functions
                 * on the PATH.
                 */
                if (func_num > 0) {
@@ -4051,7 +3994,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
 /**
  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
  *
- * @bp:                driver hanlde
+ * @bp:                driver handle
  * @p:         pointer to rss configuration
  *
  * Prints it when NETIF_MSG_IFUP debug level is configured.
@@ -4164,12 +4107,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
                data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
        }
 
-       /*
-        *  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long we would
+        * need to ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read and we will have to put a full memory barrier there
+        * (inside bnx2x_sp_post()).
         */
 
        /* Send a ramrod */
@@ -4215,7 +4157,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
        return rc;
 }
 
-
 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
                               struct bnx2x_rss_config_obj *rss_obj,
                               u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
@@ -4288,7 +4229,6 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
        return !!test_bit(pending_bit, pending);
 }
 
-
 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
                                   struct bnx2x_queue_state_params *params)
 {
@@ -4337,7 +4277,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
        }
 
        if (o->next_tx_only >= o->max_cos)
-               /* >= becuase tx only must always be smaller than cos since the
+               /* >= because tx only must always be smaller than cos since the
                 * primary connection supports COS 0
                 */
                BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
@@ -4403,7 +4343,6 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
        gen_data->mtu = cpu_to_le16(params->mtu);
        gen_data->func_id = o->func_id;
 
-
        gen_data->cos = params->cos;
 
        gen_data->traffic_type =
@@ -4530,7 +4469,6 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
                cpu_to_le16(params->silent_removal_value);
        rx_data->silent_vlan_mask =
                cpu_to_le16(params->silent_removal_mask);
-
 }
 
 /* initialize the general, tx and rx parts of a queue object */
@@ -4652,12 +4590,11 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
        /* Fill the ramrod data */
        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
 
-       /*
-        *  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long we would
+        * need to ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read and we will have to put a full memory barrier there
+        * (inside bnx2x_sp_post()).
         */
 
        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
@@ -4681,12 +4618,11 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
        bnx2x_q_fill_setup_data_e2(bp, params, rdata);
 
-       /*
-        *  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long we would
+        * need to ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read and we will have to put a full memory barrier there
+        * (inside bnx2x_sp_post()).
         */
 
        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
@@ -4706,7 +4642,6 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
                &params->params.tx_only;
        u8 cid_index = tx_only_params->cid_index;
 
-
        if (cid_index >= o->max_cos) {
                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
                          o->cl_id, cid_index);
@@ -4727,12 +4662,11 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
                         o->cids[cid_index], rdata->general.client_id,
                         rdata->general.sp_client_id, rdata->general.cos);
 
-       /*
-        *  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long we would
+        * need to ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read and we will have to put a full memory barrier there
+        * (inside bnx2x_sp_post()).
         */
 
        return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
@@ -4761,7 +4695,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
                test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
                         &params->update_flags);
 
-       /* Outer VLAN sripping */
+       /* Outer VLAN stripping */
        data->outer_vlan_removal_enable_flg =
                test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
        data->outer_vlan_removal_change_flg =
@@ -4816,19 +4750,17 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
                return -EINVAL;
        }
 
-
        /* Clear the ramrod data */
        memset(rdata, 0, sizeof(*rdata));
 
        /* Fill the ramrod data */
        bnx2x_q_fill_update_data(bp, o, update_params, rdata);
 
-       /*
-        *  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long we would
+        * need to ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read and we will have to put a full memory barrier there
+        * (inside bnx2x_sp_post()).
         */
 
        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
@@ -5038,8 +4970,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
                 &params->params.update;
        u8 next_tx_only = o->num_tx_only;
 
-       /*
-        * Forget all pending for completion commands if a driver only state
+       /* Forget all pending for completion commands if a driver only state
         * transition has been requested.
         */
        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@@ -5047,8 +4978,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
                o->next_state = BNX2X_Q_STATE_MAX;
        }
 
-       /*
-        * Don't allow a next state transition if we are in the middle of
+       /* Don't allow a next state transition if we are in the middle of
         * the previous one.
         */
        if (o->pending) {
@@ -5257,8 +5187,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
        if (o->pending)
                return BNX2X_F_STATE_MAX;
 
-       /*
-        * unsure the order of reading of o->pending and o->state
+       /* unsure the order of reading of o->pending and o->state
         * o->pending should be read first
         */
        rmb();
@@ -5356,8 +5285,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
        enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
        enum bnx2x_func_cmd cmd = params->cmd;
 
-       /*
-        * Forget all pending for completion commands if a driver only state
+       /* Forget all pending for completion commands if a driver only state
         * transition has been requested.
         */
        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
@@ -5365,8 +5293,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
                o->next_state = BNX2X_F_STATE_MAX;
        }
 
-       /*
-        * Don't allow a next state transition if we are in the middle of
+       /* Don't allow a next state transition if we are in the middle of
         * the previous one.
         */
        if (o->pending)
@@ -5539,7 +5466,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
                goto init_err;
        }
 
-       /* Handle the beginning of COMMON_XXX pases separatelly... */
+       /* Handle the beginning of COMMON_XXX pases separately... */
        switch (load_code) {
        case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
                rc = bnx2x_func_init_cmn_chip(bp, drv);
@@ -5573,7 +5500,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
 init_err:
        drv->gunzip_end(bp);
 
-       /* In case of success, complete the comand immediatelly: no ramrods
+       /* In case of success, complete the command immediately: no ramrods
         * have been sent.
         */
        if (!rc)
@@ -5598,7 +5525,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp,
 }
 
 /**
- * bnx2x_func_reset_port - reser HW at port stage
+ * bnx2x_func_reset_port - reset HW at port stage
  *
  * @bp:                device handle
  * @drv:
@@ -5620,7 +5547,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp,
 }
 
 /**
- * bnx2x_func_reset_cmn - reser HW at common stage
+ * bnx2x_func_reset_cmn - reset HW at common stage
  *
  * @bp:                device handle
  * @drv:
@@ -5636,7 +5563,6 @@ static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
        drv->reset_hw_cmn(bp);
 }
 
-
 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
                                      struct bnx2x_func_state_params *params)
 {
@@ -5663,7 +5589,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
                break;
        }
 
-       /* Complete the comand immediatelly: no ramrods have been sent. */
+       /* Complete the command immediately: no ramrods have been sent. */
        o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
 
        return 0;
index 43c00bc84a08826368d58e6253f3070a37118a68..798dfe9967336fedc4a5e07806c7216c83a955ed 100644 (file)
@@ -34,8 +34,7 @@ enum {
        RAMROD_RESTORE,
         /* Execute the next command now */
        RAMROD_EXEC,
-       /*
-        * Don't add a new command and continue execution of posponed
+       /* Don't add a new command and continue execution of postponed
         * commands. If not set a new command will be added to the
         * pending commands list.
         */
@@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd {
 struct bnx2x_vlan_mac_data {
        /* Requested command: BNX2X_VLAN_MAC_XX */
        enum bnx2x_vlan_mac_cmd cmd;
-       /*
-        * used to contain the data related vlan_mac_flags bits from
+       /* used to contain the data related vlan_mac_flags bits from
         * ramrod parameters.
         */
        unsigned long vlan_mac_flags;
@@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem *
                                     struct bnx2x_exeq_elem *elem);
 
 struct bnx2x_exe_queue_obj {
-       /*
-        * Commands pending for an execution.
-        */
+       /* Commands pending for an execution. */
        struct list_head        exe_queue;
 
-       /*
-        * Commands pending for an completion.
-        */
+       /* Commands pending for an completion. */
        struct list_head        pending_comp;
 
        spinlock_t              lock;
@@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj {
 };
 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
 /*
- * Element in the VLAN_MAC registry list having all currenty configured
+ * Element in the VLAN_MAC registry list having all currently configured
  * rules.
  */
 struct bnx2x_vlan_mac_registry_elem {
        struct list_head        link;
 
-       /*
-        * Used to store the cam offset used for the mac/vlan/vlan-mac.
+       /* Used to store the cam offset used for the mac/vlan/vlan-mac.
         * Relevant for 57710 and 57711 only. VLANs and MACs share the
         * same CAM for these chips.
         */
@@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj {
         * @param n number of elements to get
         * @param buf buffer preallocated by caller into which elements
         *            will be copied. Note elements are 4-byte aligned
-        *            so buffer size must be able to accomodate the
+        *            so buffer size must be able to accommodate the
         *            aligned elements.
         *
         * @return number of copied bytes
@@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj {
         * @param bp
         * @param p Command parameters (RAMROD_COMP_WAIT bit in
         *          ramrod_flags is only taken into an account)
-        * @param ppos a pointer to the cooky that should be given back in the
+        * @param ppos a pointer to the cookie that should be given back in the
         *        next call to make function handle the next element. If
         *        *ppos is set to NULL it will restart the iterator.
         *        If returned *ppos == NULL this means that the last
@@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj {
                       struct bnx2x_vlan_mac_registry_elem **ppos);
 
        /**
-        * Should be called on a completion arival.
+        * Should be called on a completion arrival.
         *
         * @param bp
         * @param o
@@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp,
 
 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 
-/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
+/* RX_MODE ramrod special flags: set in rx_mode_flags field in
  * a bnx2x_rx_mode_ramrod_params.
  */
 enum {
@@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params {
        unsigned long ramrod_flags;
        unsigned long rx_mode_flags;
 
-       /*
-        * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+       /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
         * a tstorm_eth_mac_filter_config (e1x).
         */
        void *rdata;
@@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj {
        /* Maximum allowed credit. put() will check against it. */
        int             pool_sz;
 
-       /*
-        *  Allocate a pool table statically.
+       /* Allocate a pool table statically.
         *
-        *  Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
+        * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
         *
-        *  The set bit in the table will mean that the entry is available.
+        * The set bit in the table will mean that the entry is available.
         */
 #define BNX2X_POOL_VEC_SIZE    (MAX_MAC_CREDIT_E2 / 64)
        u64             pool_mirror[BNX2X_POOL_VEC_SIZE];
@@ -832,7 +823,7 @@ enum {
        BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
 };
 
-/* Queue type options: queue type may be a compination of below. */
+/* Queue type options: queue type may be a combination of below. */
 enum bnx2x_q_type {
        /** TODO: Consider moving both these flags into the init()
         *        ramrod params.
@@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj {
        u8              cl_id;
        u8              func_id;
 
-       /*
-        * number of traffic classes supported by queue.
-        * The primary connection of the queue suppotrs the first traffic
-        * class. Any further traffic class is suppoted by a tx-only
+       /* number of traffic classes supported by queue.
+        * The primary connection of the queue supports the first traffic
+        * class. Any further traffic class is supported by a tx-only
         * connection.
         *
         * Therefore max_cos is also a number of valid entries in the cids
@@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj {
 
        /* BNX2X_Q_CMD_XX bits. This object implements "one
         * pending" paradigm but for debug and tracing purposes it's
-        * more convinient to have different bits for different
+        * more convenient to have different bits for different
         * commands.
         */
        unsigned long   pending;
@@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj {
 
        /* BNX2X_FUNC_CMD_XX bits. This object implements "one
         * pending" paradigm but for debug and tracing purposes it's
-        * more convinient to have different bits for different
+        * more convenient to have different bits for different
         * commands.
         */
        unsigned long           pending;
@@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
  *
  * @p: Command parameters
  *
- * Return: 0 - if operation was successfull and there is no pending completions,
+ * Return: 0 - if operation was successful and there is no pending completions,
  *         positive number - if there are pending completions,
  *         negative - if there were errors
  */
@@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
  * the current command will be enqueued to the tail of the
  * pending commands list.
  *
- * Return: 0 is operation was successfull and there are no pending completions,
+ * Return: 0 is operation was successful and there are no pending completions,
  *         negative if there were errors, positive if there are pending
  *         completions.
  */
@@ -1377,7 +1367,6 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
                                 u8 func_num);
 
-
 /****************** RSS CONFIGURATION ****************/
 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
                               struct bnx2x_rss_config_obj *rss_obj,
index 2ce7c7471367812bc88375ab600211c7b2a437db..95861efb505187f07bd1a176640e4f44af14a5de 100644 (file)
@@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
  */
 
 /* internal vf enable - until vf is enabled internally all transactions
- * are blocked. this routine should always be called last with pretend.
+ * are blocked. This routine should always be called last with pretend.
  */
 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
 {
@@ -1459,21 +1459,16 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 
        if (!vf)
-               goto unknown_dev;
+               return false;
 
        dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
        if (dev)
                return bnx2x_is_pcie_pending(dev);
-
-unknown_dev:
        return false;
 }
 
 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
 {
-       /* Wait 100ms */
-       msleep(100);
-
        /* Verify no pending pci transactions */
        if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
                BNX2X_ERR("PCIE Transactions still pending\n");
@@ -1620,7 +1615,7 @@ next_vf_to_clean:
             i++)
                ;
 
-       DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i,
+       DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
           BNX2X_NR_VIRTFN(bp));
 
        if (i < BNX2X_NR_VIRTFN(bp)) {
@@ -1743,7 +1738,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
        REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
        REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
 
-       /* set the number of VF alllowed doorbells to the full DQ range */
+       /* set the number of VF allowed doorbells to the full DQ range */
        REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
 
        /* set the VF doorbell threshold */
@@ -2176,6 +2171,9 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
 
        DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
 
+       /* let FLR complete ... */
+       msleep(100);
+
        /* initialize vf database */
        for_each_vf(bp, vfid) {
                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
@@ -2403,7 +2401,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
 
        /* extract vf and rxq index from vf_cid - relies on the following:
         * 1. vfid on cid reflects the true abs_vfid
-        * 2. the max number of VFs (per path) is 64
+        * 2. The max number of VFs (per path) is 64
         */
        qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
        abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
@@ -2461,7 +2459,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
 {
        /* extract the vf from vf_cid - relies on the following:
         * 1. vfid on cid reflects the true abs_vfid
-        * 2. the max number of VFs (per path) is 64
+        * 2. The max number of VFs (per path) is 64
         */
        int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
        return bnx2x_vf_by_abs_fid(bp, abs_vfid);
@@ -2480,7 +2478,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
        if (vf) {
                /* extract queue index from vf_cid - relies on the following:
                 * 1. vfid on cid reflects the true abs_vfid
-                * 2. the max number of VFs (per path) is 64
+                * 2. The max number of VFs (per path) is 64
                 */
                int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
                *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
@@ -2705,7 +2703,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
        }
 
        /* static allocation:
-        * the global maximum number are fixed per VF. fail the request if
+        * the global maximum number are fixed per VF. Fail the request if
         * requested number exceed these globals
         */
        if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
@@ -2777,6 +2775,10 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
                   vf->abs_vfid, vf->state);
                return -EINVAL;
        }
+
+       /* let FLR complete ... */
+       msleep(100);
+
        /* FLR cleanup epilogue */
        if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
                return -EBUSY;
@@ -2890,7 +2892,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp,
        return -ENOMEM;
 }
 
-/* VF release can be called either: 1. the VF was acquired but
+/* VF release can be called either: 1. The VF was acquired but
  * not enabled 2. the vf was enabled or in the process of being
  * enabled
  */
@@ -3024,7 +3026,6 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
 
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
 {
-
        struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
 
        DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
@@ -3032,7 +3033,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
 
        /* HW channel is only operational when PF is up */
        if (bp->state != BNX2X_STATE_OPEN) {
-               BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
+               BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
                return -EINVAL;
        }
 
@@ -3086,6 +3087,11 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
 static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
                               struct bnx2x_virtf *vf)
 {
+       if (bp->state != BNX2X_STATE_OPEN) {
+               BNX2X_ERR("vf ndo called though PF is down\n");
+               return -EINVAL;
+       }
+
        if (!IS_SRIOV(bp)) {
                BNX2X_ERR("vf ndo called though sriov is disabled\n");
                return -EINVAL;
@@ -3141,7 +3147,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                        /* mac configured by ndo so its in bulletin board */
                        memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
                else
-                       /* funtion has not been loaded yet. Show mac as 0s */
+                       /* function has not been loaded yet. Show mac as 0s */
                        memset(&ivi->mac, 0, ETH_ALEN);
 
                /* vlan */
@@ -3149,7 +3155,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                        /* vlan configured by ndo so its in bulletin board */
                        memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
                else
-                       /* funtion has not been loaded yet. Show vlans as 0s */
+                       /* function has not been loaded yet. Show vlans as 0s */
                        memset(&ivi->vlan, 0, VLAN_HLEN);
        }
 
@@ -3189,7 +3195,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
                return -EINVAL;
        }
 
-       /* update PF's copy of the VF's bulletin. will no longer accept mac
+       /* update PF's copy of the VF's bulletin. Will no longer accept mac
         * configuration requests from vf unless match this mac
         */
        bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
@@ -3358,8 +3364,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
        return 0;
 }
 
-/* crc is the first field in the bulletin board. compute the crc over the
- * entire bulletin board excluding the crc field itself
+/* crc is the first field in the bulletin board. Compute the crc over the
+ * entire bulletin board excluding the crc field itself. Use the length field
+ * as the Bulletin Board was posted by a PF with possibly a different version
+ * from the vf which will sample it. Therefore, the length is computed by the
+ * PF and the used blindly by the VF.
  */
 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
                          struct pf_vf_bulletin_content *bulletin)
@@ -3389,7 +3398,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
                        if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
                                                                  &bulletin))
                                break;
-                       BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
+                       BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
                                  bulletin.crc,
                                  bnx2x_crc_vf_bulletin(bp, &bulletin));
                }
@@ -3417,6 +3426,20 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
        return PFVF_BULLETIN_UPDATED;
 }
 
+void bnx2x_timer_sriov(struct bnx2x *bp)
+{
+       bnx2x_sample_bulletin(bp);
+
+       /* if channel is down we need to self destruct */
+       if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
+               smp_mb__before_clear_bit();
+               set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+                       &bp->sp_rtnl_state);
+               smp_mb__after_clear_bit();
+               schedule_delayed_work(&bp->sp_rtnl_task, 0);
+       }
+}
+
 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
 {
        /* vf doorbells are embedded within the regview */
@@ -3452,7 +3475,7 @@ int bnx2x_open_epilog(struct bnx2x *bp)
         * register_netdevice which must have rtnl lock taken. As we are holding
         * the lock right now, that could only work if the probe would not take
         * the lock. However, as the probe of the vf may be called from other
-        * contexts as well (such as passthrough to vm failes) it can't assume
+        * contexts as well (such as passthrough to vm fails) it can't assume
         * the lock is being held for it. Using delayed work here allows the
         * probe code to simply take the lock (i.e. wait for it to be released
         * if it is being held). We only want to do this if the number of VFs
@@ -3467,3 +3490,23 @@ int bnx2x_open_epilog(struct bnx2x *bp)
 
        return 0;
 }
+
+void bnx2x_iov_channel_down(struct bnx2x *bp)
+{
+       int vf_idx;
+       struct pf_vf_bulletin_content *bulletin;
+
+       if (!IS_SRIOV(bp))
+               return;
+
+       for_each_vf(bp, vf_idx) {
+               /* locate this VFs bulletin board and update the channel down
+                * bit
+                */
+               bulletin = BP_VF_BULLETIN(bp, vf_idx);
+               bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
+
+               /* update vf bulletin board */
+               bnx2x_post_vf_bulletin(bp, vf_idx);
+       }
+}
index d67ddc554c0f27065c7912996230d2aafb6ea89f..d143a7cdbbbedeb78fa831f1930f6228cfcc7e3d 100644 (file)
@@ -197,7 +197,7 @@ struct bnx2x_virtf {
 
        u8 state;
 #define VF_FREE                0       /* VF ready to be acquired holds no resc */
-#define VF_ACQUIRED    1       /* VF aquired, but not initalized */
+#define VF_ACQUIRED    1       /* VF acquired, but not initialized */
 #define VF_ENABLED     2       /* VF Enabled */
 #define VF_RESET       3       /* VF FLR'd, pending cleanup */
 
@@ -496,7 +496,7 @@ enum {
                else if ((next) == VFOP_VERIFY_PEND)                    \
                        BNX2X_ERR("expected pending\n");                \
                else {                                                  \
-                       DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n");   \
+                       DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n");   \
                        atomic_set(&vf->op_in_progress, 1);             \
                        queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);  \
                        return;                                         \
@@ -722,7 +722,6 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
                          struct pf_vf_bulletin_content *bulletin);
 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
 
-
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
 
 /* VF side vfpf channel functions */
@@ -752,6 +751,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
 }
 
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_timer_sriov(struct bnx2x *bp);
 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
 int bnx2x_vf_pci_alloc(struct bnx2x *bp);
 int bnx2x_enable_sriov(struct bnx2x *bp);
@@ -762,6 +762,7 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
 }
 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+void bnx2x_iov_channel_down(struct bnx2x *bp);
 int bnx2x_open_epilog(struct bnx2x *bp);
 
 #else /* CONFIG_BNX2X_SRIOV */
@@ -809,6 +810,7 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
 {
        return PFVF_BULLETIN_UNCHANGED;
 }
+static inline void bnx2x_timer_sriov(struct bnx2x *bp) {}
 
 static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
 {
@@ -818,6 +820,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
 static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
 
 #endif /* CONFIG_BNX2X_SRIOV */
index 2ca3d94fcec2ba5f5f11db647d4454ddeafa34ad..98366abd02bda520cf1876189cced5926175288f 100644 (file)
@@ -1002,7 +1002,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
                qstats->valid_bytes_received_lo =
                                        qstats->total_bytes_received_lo;
 
-
                UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
                                        total_unicast_packets_received);
                UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
index d117f472816c39effa4f48f4928f52ff373436ba..853824d258e88d3b75f277fbb482357445f3260f 100644 (file)
@@ -40,7 +40,6 @@ struct nig_stats {
        u32 egress_mac_pkt1_hi;
 };
 
-
 enum bnx2x_stats_event {
        STATS_EVENT_PMF = 0,
        STATS_EVENT_LINK_UP,
@@ -208,7 +207,6 @@ struct bnx2x_eth_stats {
        u32 eee_tx_lpi;
 };
 
-
 struct bnx2x_eth_q_stats {
        u32 total_unicast_bytes_received_hi;
        u32 total_unicast_bytes_received_lo;
@@ -331,7 +329,6 @@ struct bnx2x_fw_port_stats_old {
         u32 mac_discard;
 };
 
-
 /****************************************************************************
 * Macros
 ****************************************************************************/
@@ -536,7 +533,6 @@ struct bnx2x_fw_port_stats_old {
                SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
        } while (0)
 
-
 /* forward */
 struct bnx2x;
 
index 928b074d7d80bdf9b71ea2a7e5121d9c393050ba..2088063151d60ab72f59650ea36765acb8710291 100644 (file)
@@ -113,7 +113,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
 {
        struct cstorm_vf_zone_data __iomem *zone_data =
                REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
-       int tout = 600, interval = 100; /* wait for 60 seconds */
+       int tout = 100, interval = 100; /* wait for 10 seconds */
 
        if (*done) {
                BNX2X_ERR("done was non zero before message to pf was sent\n");
@@ -121,6 +121,16 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
                return -EINVAL;
        }
 
+       /* if PF indicated channel is down avoid sending message. Return success
+        * so calling flow can continue
+        */
+       bnx2x_sample_bulletin(bp);
+       if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
+               DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
+               *done = PFVF_STATUS_SUCCESS;
+               return 0;
+       }
+
        /* Write message address */
        writel(U64_LO(msg_mapping),
               &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
@@ -233,7 +243,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
 
                attempts++;
 
-               /* test whether the PF accepted our request. If not, humble the
+               /* test whether the PF accepted our request. If not, humble
                 * the request and try again.
                 */
                if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
@@ -333,7 +343,7 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
                DP(BNX2X_MSG_SP, "vf released\n");
        } else {
                /* PF reports error */
-               BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
+               BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
                          resp->hdr.status);
                rc = -EAGAIN;
                goto out;
@@ -787,7 +797,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
                storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
 }
 
-/* enable vf_pf mailbox (aka vf-pf-chanell) */
+/* enable vf_pf mailbox (aka vf-pf-channel) */
 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
 {
        bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
@@ -844,7 +854,6 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
                dmae.dst_addr_hi = vf_addr_hi;
        }
        dmae.len = len32;
-       bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
 
        /* issue the command and wait for completion */
        return bnx2x_issue_dmae_with_comp(bp, &dmae);
@@ -1072,7 +1081,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
        if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
                __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
 
-       /* outer vlan removal is set according to the PF's multi fuction mode */
+       /* outer vlan removal is set according to PF's multi function mode */
        if (IS_MF_SD(bp))
                __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
 }
@@ -1104,7 +1113,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
                struct bnx2x_queue_init_params *init_p;
                struct bnx2x_queue_setup_params *setup_p;
 
-               /* reinit the VF operation context */
+               /* re-init the VF operation context */
                memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
                setup_p = &vf->op_params.qctor.prep_qsetup;
                init_p =  &vf->op_params.qctor.qstate.params.init;
@@ -1588,8 +1597,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                 * support them. Or this may be because someone wrote a crappy
                 * VF driver and is sending garbage over the channel.
                 */
-               BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
-                         mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+               BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
+                         mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
+                         vf->state);
                for (i = 0; i < 20; i++)
                        DP_CONT(BNX2X_MSG_IOV, "%x ",
                                mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
@@ -1605,8 +1615,11 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        bnx2x_vf_mbx_resp(bp, vf);
                } else {
                        /* can't send a response since this VF is unknown to us
-                        * just unlock the channel and be done with.
+                        * just ack the FW to release the mailbox and unlock
+                        * the channel.
                         */
+                       storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+                       mmiowb();
                        bnx2x_unlock_vf_pf_channel(bp, vf,
                                                   mbx->first_tlv.tl.type);
                }
index 41708faab5752ac00f4f59edcceebf8545f59da3..f3ad174a3a63b622e53e4355ca335eeeabd972af 100644 (file)
@@ -331,7 +331,10 @@ struct pf_vf_bulletin_content {
 #define VLAN_VALID             1       /* when set, the vf should not access
                                         * the vfpf channel
                                         */
-
+#define CHANNEL_DOWN           2       /* vfpf channel is disabled. VFs are not
+                                        * to attempt to send messages on the
+                                        * channel after this bit is set
+                                        */
        u8 mac[ETH_ALEN];
        u8 mac_padding[2];
 
index 6b0dc131b20ea3fd057fdd8c1fa98243ecbdf42c..d78d4cf140ed6d20a0e513d3c974740cf6c145cd 100644 (file)
@@ -5622,7 +5622,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
                                                         void *ptr)
 {
-       struct net_device *netdev = ptr;
+       struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
        struct cnic_dev *dev;
        int new_dev = 0;
 
index e80bfb60c3efb922a76149ce81b9a120c60fe9ce..c2777712da991dd72b876eac74fb991c73b12010 100644 (file)
@@ -2197,7 +2197,7 @@ static const struct net_device_ops sbmac_netdev_ops = {
 
 static int sbmac_init(struct platform_device *pldev, long long base)
 {
-       struct net_device *dev = dev_get_drvdata(&pldev->dev);
+       struct net_device *dev = platform_get_drvdata(pldev);
        int idx = pldev->id;
        struct sbmac_softc *sc = netdev_priv(dev);
        unsigned char *eaddr;
@@ -2275,7 +2275,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
                       dev->name);
                goto free_mdio;
        }
-       dev_set_drvdata(&pldev->dev, sc->mii_bus);
+       platform_set_drvdata(pldev, sc->mii_bus);
 
        err = register_netdev(dev);
        if (err) {
@@ -2300,7 +2300,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
        return 0;
 unreg_mdio:
        mdiobus_unregister(sc->mii_bus);
-       dev_set_drvdata(&pldev->dev, NULL);
 free_mdio:
        mdiobus_free(sc->mii_bus);
 uninit_ctx:
@@ -2624,7 +2623,7 @@ static int sbmac_probe(struct platform_device *pldev)
                goto out_unmap;
        }
 
-       dev_set_drvdata(&pldev->dev, dev);
+       platform_set_drvdata(pldev, dev);
        SET_NETDEV_DEV(dev, &pldev->dev);
 
        sc = netdev_priv(dev);
@@ -2649,7 +2648,7 @@ out_out:
 
 static int __exit sbmac_remove(struct platform_device *pldev)
 {
-       struct net_device *dev = dev_get_drvdata(&pldev->dev);
+       struct net_device *dev = platform_get_drvdata(pldev);
        struct sbmac_softc *sc = netdev_priv(dev);
 
        unregister_netdev(dev);
index 728d42ab2a7636e4656a28174ac2b8c9159c60f0..986df04fdcb36e2cee6d3bef0b2e5aaf03711988 100644 (file)
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    131
+#define TG3_MIN_NUM                    132
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "April 09, 2013"
+#define DRV_MODULE_RELDATE     "May 21, 2013"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -965,9 +965,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
 
                event = APE_EVENT_STATUS_STATE_UNLOAD;
                break;
-       case RESET_KIND_SUSPEND:
-               event = APE_EVENT_STATUS_STATE_SUSPEND;
-               break;
        default:
                return;
        }
@@ -1314,8 +1311,8 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
 
        if (err)
                return err;
-       if (enable)
 
+       if (enable)
                val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
        else
                val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
@@ -1739,10 +1736,6 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
                        break;
                }
        }
-
-       if (kind == RESET_KIND_INIT ||
-           kind == RESET_KIND_SUSPEND)
-               tg3_ape_driver_state_change(tp, kind);
 }
 
 /* tp->lock is held. */
@@ -1764,9 +1757,6 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
                        break;
                }
        }
-
-       if (kind == RESET_KIND_SHUTDOWN)
-               tg3_ape_driver_state_change(tp, kind);
 }
 
 /* tp->lock is held. */
@@ -1800,6 +1790,9 @@ static int tg3_poll_fw(struct tg3 *tp)
        int i;
        u32 val;
 
+       if (tg3_flag(tp, NO_FWARE_REPORTED))
+               return 0;
+
        if (tg3_flag(tp, IS_SSB_CORE)) {
                /* We don't use firmware. */
                return 0;
@@ -2320,6 +2313,46 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
        tg3_phy_toggle_auxctl_smdsp(tp, false);
 }
 
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+{
+       u32 val;
+       struct ethtool_eee *dest = &tp->eee;
+
+       if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+               return;
+
+       if (eee)
+               dest = eee;
+
+       if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
+               return;
+
+       /* Pull eee_active */
+       if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
+           val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
+               dest->eee_active = 1;
+       } else
+               dest->eee_active = 0;
+
+       /* Pull lp advertised settings */
+       if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
+               return;
+       dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+
+       /* Pull advertised and eee_enabled settings */
+       if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
+               return;
+       dest->eee_enabled = !!val;
+       dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+
+       /* Pull tx_lpi_enabled */
+       val = tr32(TG3_CPMU_EEE_MODE);
+       dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
+
+       /* Pull lpi timer value */
+       dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
+}
+
 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
 {
        u32 val;
@@ -2343,11 +2376,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
 
                tw32(TG3_CPMU_EEE_CTRL, eeectl);
 
-               tg3_phy_cl45_read(tp, MDIO_MMD_AN,
-                                 TG3_CL45_D7_EEERES_STAT, &val);
-
-               if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
-                   val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+               tg3_eee_pull_config(tp, NULL);
+               if (tp->eee.eee_active)
                        tp->setlpicnt = 2;
        }
 
@@ -2957,6 +2987,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
        return 0;
 }
 
+static bool tg3_phy_power_bug(struct tg3 *tp)
+{
+       switch (tg3_asic_rev(tp)) {
+       case ASIC_REV_5700:
+       case ASIC_REV_5704:
+               return true;
+       case ASIC_REV_5780:
+               if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+                       return true;
+               return false;
+       case ASIC_REV_5717:
+               if (!tp->pci_fn)
+                       return true;
+               return false;
+       case ASIC_REV_5719:
+       case ASIC_REV_5720:
+               if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+                   !tp->pci_fn)
+                       return true;
+               return false;
+       }
+
+       return false;
+}
+
 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 {
        u32 val;
@@ -3016,12 +3071,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
        /* The PHY should not be powered down on some chips because
         * of bugs.
         */
-       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
-           tg3_asic_rev(tp) == ASIC_REV_5704 ||
-           (tg3_asic_rev(tp) == ASIC_REV_5780 &&
-            (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
-           (tg3_asic_rev(tp) == ASIC_REV_5717 &&
-            !tp->pci_fn))
+       if (tg3_phy_power_bug(tp))
                return;
 
        if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
@@ -4149,6 +4199,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
 
        tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
 
+       tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
+
        return 0;
 }
 
@@ -4249,6 +4301,16 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
                /* Advertise 1000-BaseT EEE ability */
                if (advertise & ADVERTISED_1000baseT_Full)
                        val |= MDIO_AN_EEE_ADV_1000T;
+
+               if (!tp->eee.eee_enabled) {
+                       val = 0;
+                       tp->eee.advertised = 0;
+               } else {
+                       tp->eee.advertised = advertise &
+                                            (ADVERTISED_100baseT_Full |
+                                             ADVERTISED_1000baseT_Full);
+               }
+
                err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
                if (err)
                        val = 0;
@@ -4493,26 +4555,23 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
 
 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
 {
-       u32 val;
-       u32 tgtadv = 0;
-       u32 advertising = tp->link_config.advertising;
+       struct ethtool_eee eee;
 
        if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
                return true;
 
-       if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
-               return false;
-
-       val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
-
+       tg3_eee_pull_config(tp, &eee);
 
-       if (advertising & ADVERTISED_100baseT_Full)
-               tgtadv |= MDIO_AN_EEE_ADV_100TX;
-       if (advertising & ADVERTISED_1000baseT_Full)
-               tgtadv |= MDIO_AN_EEE_ADV_1000T;
-
-       if (val != tgtadv)
-               return false;
+       if (tp->eee.eee_enabled) {
+               if (tp->eee.advertised != eee.advertised ||
+                   tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
+                   tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
+                       return false;
+       } else {
+               /* EEE is disabled but we're advertising */
+               if (eee.advertised)
+                       return false;
+       }
 
        return true;
 }
@@ -4613,6 +4672,42 @@ static void tg3_clear_mac_status(struct tg3 *tp)
        udelay(40);
 }
 
+static void tg3_setup_eee(struct tg3 *tp)
+{
+       u32 val;
+
+       val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+             TG3_CPMU_EEE_LNKIDL_UART_IDL;
+       if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
+               val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
+
+       tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
+
+       tw32_f(TG3_CPMU_EEE_CTRL,
+              TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+       val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+             (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
+             TG3_CPMU_EEEMD_LPI_IN_RX |
+             TG3_CPMU_EEEMD_EEE_ENABLE;
+
+       if (tg3_asic_rev(tp) != ASIC_REV_5717)
+               val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+       if (tg3_flag(tp, ENABLE_APE))
+               val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+       tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
+
+       tw32_f(TG3_CPMU_EEE_DBTMR1,
+              TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+              (tp->eee.tx_lpi_timer & 0xffff));
+
+       tw32_f(TG3_CPMU_EEE_DBTMR2,
+              TG3_CPMU_DBTMR2_APE_TX_2047US |
+              TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+}
+
 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
 {
        bool current_link_up;
@@ -4779,8 +4874,10 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
                         */
                        if (!eee_config_ok &&
                            (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
-                           !force_reset)
+                           !force_reset) {
+                               tg3_setup_eee(tp);
                                tg3_phy_reset(tp);
+                       }
                } else {
                        if (!(bmcr & BMCR_ANENABLE) &&
                            tp->link_config.speed == current_speed &&
@@ -6292,9 +6389,7 @@ static void tg3_tx_recover(struct tg3 *tp)
                    "Please report the problem to the driver maintainer "
                    "and include system chipset information.\n");
 
-       spin_lock(&tp->lock);
        tg3_flag_set(tp, TX_RECOVERY_PENDING);
-       spin_unlock(&tp->lock);
 }
 
 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
@@ -7428,6 +7523,20 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
        return (base > 0xffffdcc0) && (base + len + 8 < base);
 }
 
+/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
+ * of any 4GB boundaries: 4G, 8G, etc
+ */
+static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+                                          u32 len, u32 mss)
+{
+       if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
+               u32 base = (u32) mapping & 0xffffffff;
+
+               return ((base + len + (mss & 0x3fff)) < base);
+       }
+       return 0;
+}
+
 /* Test for DMA addresses > 40-bit */
 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
                                          int len)
@@ -7464,6 +7573,9 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
        if (tg3_4g_overflow_test(map, len))
                hwbug = true;
 
+       if (tg3_4g_tso_overflow_test(tp, map, len, mss))
+               hwbug = true;
+
        if (tg3_40bit_overflow_test(tp, map, len))
                hwbug = true;
 
@@ -8874,6 +8986,10 @@ static int tg3_chip_reset(struct tg3 *tp)
                tg3_halt_cpu(tp, RX_CPU_BASE);
        }
 
+       err = tg3_poll_fw(tp);
+       if (err)
+               return err;
+
        tw32(GRC_MODE, tp->grc_mode);
 
        if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
@@ -8904,10 +9020,6 @@ static int tg3_chip_reset(struct tg3 *tp)
 
        tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
 
-       err = tg3_poll_fw(tp);
-       if (err)
-               return err;
-
        tg3_mdio_start(tp);
 
        if (tg3_flag(tp, PCI_EXPRESS) &&
@@ -9129,11 +9241,9 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 }
 
 /* tp->lock is held. */
-static void tg3_rings_reset(struct tg3 *tp)
+static void tg3_tx_rcbs_disable(struct tg3 *tp)
 {
-       int i;
-       u32 stblk, txrcb, rxrcb, limit;
-       struct tg3_napi *tnapi = &tp->napi[0];
+       u32 txrcb, limit;
 
        /* Disable all transmit rings but the first. */
        if (!tg3_flag(tp, 5705_PLUS))
@@ -9150,7 +9260,33 @@ static void tg3_rings_reset(struct tg3 *tp)
             txrcb < limit; txrcb += TG3_BDINFO_SIZE)
                tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
                              BDINFO_FLAGS_DISABLED);
+}
+
+/* tp->lock is held. */
+static void tg3_tx_rcbs_init(struct tg3 *tp)
+{
+       int i = 0;
+       u32 txrcb = NIC_SRAM_SEND_RCB;
+
+       if (tg3_flag(tp, ENABLE_TSS))
+               i++;
+
+       for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+
+               if (!tnapi->tx_ring)
+                       continue;
+
+               tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+                              (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
+                              NIC_SRAM_TX_BUFFER_DESC);
+       }
+}
 
+/* tp->lock is held. */
+static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
+{
+       u32 rxrcb, limit;
 
        /* Disable all receive return rings but the first. */
        if (tg3_flag(tp, 5717_PLUS))
@@ -9168,6 +9304,39 @@ static void tg3_rings_reset(struct tg3 *tp)
             rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
                tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
                              BDINFO_FLAGS_DISABLED);
+}
+
+/* tp->lock is held. */
+static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
+{
+       int i = 0;
+       u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
+
+       if (tg3_flag(tp, ENABLE_RSS))
+               i++;
+
+       for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+
+               if (!tnapi->rx_rcb)
+                       continue;
+
+               tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+                              (tp->rx_ret_ring_mask + 1) <<
+                               BDINFO_FLAGS_MAXLEN_SHIFT, 0);
+       }
+}
+
+/* tp->lock is held. */
+static void tg3_rings_reset(struct tg3 *tp)
+{
+       int i;
+       u32 stblk;
+       struct tg3_napi *tnapi = &tp->napi[0];
+
+       tg3_tx_rcbs_disable(tp);
+
+       tg3_rx_ret_rcbs_disable(tp);
 
        /* Disable interrupts */
        tw32_mailbox_f(tp->napi[0].int_mbox, 1);
@@ -9204,9 +9373,6 @@ static void tg3_rings_reset(struct tg3 *tp)
                        tw32_tx_mbox(mbox + i * 8, 0);
        }
 
-       txrcb = NIC_SRAM_SEND_RCB;
-       rxrcb = NIC_SRAM_RCV_RET_RCB;
-
        /* Clear status block in ram. */
        memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 
@@ -9216,46 +9382,20 @@ static void tg3_rings_reset(struct tg3 *tp)
        tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
             ((u64) tnapi->status_mapping & 0xffffffff));
 
-       if (tnapi->tx_ring) {
-               tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
-                              (TG3_TX_RING_SIZE <<
-                               BDINFO_FLAGS_MAXLEN_SHIFT),
-                              NIC_SRAM_TX_BUFFER_DESC);
-               txrcb += TG3_BDINFO_SIZE;
-       }
-
-       if (tnapi->rx_rcb) {
-               tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
-                              (tp->rx_ret_ring_mask + 1) <<
-                               BDINFO_FLAGS_MAXLEN_SHIFT, 0);
-               rxrcb += TG3_BDINFO_SIZE;
-       }
-
        stblk = HOSTCC_STATBLCK_RING1;
 
        for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
                u64 mapping = (u64)tnapi->status_mapping;
                tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
                tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
+               stblk += 8;
 
                /* Clear status block in ram. */
                memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
-
-               if (tnapi->tx_ring) {
-                       tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
-                                      (TG3_TX_RING_SIZE <<
-                                       BDINFO_FLAGS_MAXLEN_SHIFT),
-                                      NIC_SRAM_TX_BUFFER_DESC);
-                       txrcb += TG3_BDINFO_SIZE;
-               }
-
-               tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
-                              ((tp->rx_ret_ring_mask + 1) <<
-                               BDINFO_FLAGS_MAXLEN_SHIFT), 0);
-
-               stblk += 8;
-               rxrcb += TG3_BDINFO_SIZE;
        }
+
+       tg3_tx_rcbs_init(tp);
+       tg3_rx_ret_rcbs_init(tp);
 }
 
 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
@@ -9431,6 +9571,14 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp)
        }
 }
 
+static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
+{
+       if (tg3_asic_rev(tp) == ASIC_REV_5719)
+               return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
+       else
+               return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
+}
+
 /* tp->lock is held. */
 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
 {
@@ -9447,46 +9595,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
        if (tg3_flag(tp, INIT_COMPLETE))
                tg3_abort_hw(tp, 1);
 
-       /* Enable MAC control of LPI */
-       if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
-               val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
-                     TG3_CPMU_EEE_LNKIDL_UART_IDL;
-               if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
-                       val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
-
-               tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
-
-               tw32_f(TG3_CPMU_EEE_CTRL,
-                      TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
-
-               val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
-                     TG3_CPMU_EEEMD_LPI_IN_TX |
-                     TG3_CPMU_EEEMD_LPI_IN_RX |
-                     TG3_CPMU_EEEMD_EEE_ENABLE;
-
-               if (tg3_asic_rev(tp) != ASIC_REV_5717)
-                       val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
-
-               if (tg3_flag(tp, ENABLE_APE))
-                       val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
-
-               tw32_f(TG3_CPMU_EEE_MODE, val);
-
-               tw32_f(TG3_CPMU_EEE_DBTMR1,
-                      TG3_CPMU_DBTMR1_PCIEXIT_2047US |
-                      TG3_CPMU_DBTMR1_LNKIDLE_2047US);
-
-               tw32_f(TG3_CPMU_EEE_DBTMR2,
-                      TG3_CPMU_DBTMR2_APE_TX_2047US |
-                      TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
-       }
-
        if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
            !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
                tg3_phy_pull_config(tp);
+               tg3_eee_pull_config(tp, NULL);
                tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
        }
 
+       /* Enable MAC control of LPI */
+       if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
+               tg3_setup_eee(tp);
+
        if (reset_phy)
                tg3_phy_reset(tp);
 
@@ -10116,16 +10235,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
        tw32_f(RDMAC_MODE, rdmac_mode);
        udelay(40);
 
-       if (tg3_asic_rev(tp) == ASIC_REV_5719) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720) {
                for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
                        if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
                                break;
                }
                if (i < TG3_NUM_RDMA_CHANNELS) {
                        val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
-                       val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
+                       val |= tg3_lso_rd_dma_workaround_bit(tp);
                        tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
-                       tg3_flag_set(tp, 5719_RDMA_BUG);
+                       tg3_flag_set(tp, 5719_5720_RDMA_BUG);
                }
        }
 
@@ -10358,6 +10478,13 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
  */
 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
 {
+       /* Chip may have been just powered on. If so, the boot code may still
+        * be running initialization. Wait for it to finish to avoid races in
+        * accessing the hardware.
+        */
+       tg3_enable_register_access(tp);
+       tg3_poll_fw(tp);
+
        tg3_switch_clocks(tp);
 
        tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
@@ -10489,15 +10616,15 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
        TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
        TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
        TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
-       if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
+       if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
                     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
                      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
                u32 val;
 
                val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
-               val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
+               val &= ~tg3_lso_rd_dma_workaround_bit(tp);
                tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
-               tg3_flag_clear(tp, 5719_RDMA_BUG);
+               tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
        }
 
        TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
@@ -11134,7 +11261,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
         */
        err = tg3_alloc_consistent(tp);
        if (err)
-               goto err_out1;
+               goto out_ints_fini;
 
        tg3_napi_init(tp);
 
@@ -11148,12 +11275,15 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
                                tnapi = &tp->napi[i];
                                free_irq(tnapi->irq_vec, tnapi);
                        }
-                       goto err_out2;
+                       goto out_napi_fini;
                }
        }
 
        tg3_full_lock(tp, 0);
 
+       if (init)
+               tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
+
        err = tg3_init_hw(tp, reset_phy);
        if (err) {
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -11163,7 +11293,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
        tg3_full_unlock(tp);
 
        if (err)
-               goto err_out3;
+               goto out_free_irq;
 
        if (test_irq && tg3_flag(tp, USING_MSI)) {
                err = tg3_test_msi(tp);
@@ -11174,7 +11304,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
                        tg3_free_rings(tp);
                        tg3_full_unlock(tp);
 
-                       goto err_out2;
+                       goto out_napi_fini;
                }
 
                if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
@@ -11214,18 +11344,18 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
 
        return 0;
 
-err_out3:
+out_free_irq:
        for (i = tp->irq_cnt - 1; i >= 0; i--) {
                struct tg3_napi *tnapi = &tp->napi[i];
                free_irq(tnapi->irq_vec, tnapi);
        }
 
-err_out2:
+out_napi_fini:
        tg3_napi_disable(tp);
        tg3_napi_fini(tp);
        tg3_free_consistent(tp);
 
-err_out1:
+out_ints_fini:
        tg3_ints_fini(tp);
 
        return err;
@@ -13270,11 +13400,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
        struct tg3 *tp = netdev_priv(dev);
        bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
 
-       if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
-           tg3_power_up(tp)) {
-               etest->flags |= ETH_TEST_FL_FAILED;
-               memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
-               return;
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+               if (tg3_power_up(tp)) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
+                       return;
+               }
+               tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
        }
 
        memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
@@ -13565,6 +13697,57 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
        return 0;
 }
 
+static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+               netdev_warn(tp->dev, "Board does not support EEE!\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (edata->advertised != tp->eee.advertised) {
+               netdev_warn(tp->dev,
+                           "Direct manipulation of EEE advertisement is not supported\n");
+               return -EINVAL;
+       }
+
+       if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
+               netdev_warn(tp->dev,
+                           "Maximal Tx Lpi timer supported is %#x(u)\n",
+                           TG3_CPMU_DBTMR1_LNKIDLE_MAX);
+               return -EINVAL;
+       }
+
+       tp->eee = *edata;
+
+       tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+       tg3_warn_mgmt_link_flap(tp);
+
+       if (netif_running(tp->dev)) {
+               tg3_full_lock(tp, 0);
+               tg3_setup_eee(tp);
+               tg3_phy_reset(tp);
+               tg3_full_unlock(tp);
+       }
+
+       return 0;
+}
+
+static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+               netdev_warn(tp->dev,
+                           "Board does not support EEE!\n");
+               return -EOPNOTSUPP;
+       }
+
+       *edata = tp->eee;
+       return 0;
+}
+
 static const struct ethtool_ops tg3_ethtool_ops = {
        .get_settings           = tg3_get_settings,
        .set_settings           = tg3_set_settings,
@@ -13598,6 +13781,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
        .get_channels           = tg3_get_channels,
        .set_channels           = tg3_set_channels,
        .get_ts_info            = tg3_get_ts_info,
+       .get_eee                = tg3_get_eee,
+       .set_eee                = tg3_set_eee,
 };
 
 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
@@ -14946,9 +15131,18 @@ static int tg3_phy_probe(struct tg3 *tp)
             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
              tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
             (tg3_asic_rev(tp) == ASIC_REV_57765 &&
-             tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
+             tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
                tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
 
+               tp->eee.supported = SUPPORTED_100baseT_Full |
+                                   SUPPORTED_1000baseT_Full;
+               tp->eee.advertised = ADVERTISED_100baseT_Full |
+                                    ADVERTISED_1000baseT_Full;
+               tp->eee.eee_enabled = 1;
+               tp->eee.tx_lpi_enabled = 1;
+               tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
+       }
+
        tg3_phy_init_link_config(tp);
 
        if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
@@ -17020,7 +17214,7 @@ static int tg3_init_one(struct pci_dev *pdev,
 {
        struct net_device *dev;
        struct tg3 *tp;
-       int i, err, pm_cap;
+       int i, err;
        u32 sndmbx, rcvmbx, intmbx;
        char str[40];
        u64 dma_mask, persist_dma_mask;
@@ -17042,25 +17236,10 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       /* Find power-management capability. */
-       pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
-       if (pm_cap == 0) {
-               dev_err(&pdev->dev,
-                       "Cannot find Power Management capability, aborting\n");
-               err = -EIO;
-               goto err_out_free_res;
-       }
-
-       err = pci_set_power_state(pdev, PCI_D0);
-       if (err) {
-               dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
-               goto err_out_free_res;
-       }
-
        dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
        if (!dev) {
                err = -ENOMEM;
-               goto err_out_power_down;
+               goto err_out_free_res;
        }
 
        SET_NETDEV_DEV(dev, &pdev->dev);
@@ -17068,7 +17247,7 @@ static int tg3_init_one(struct pci_dev *pdev,
        tp = netdev_priv(dev);
        tp->pdev = pdev;
        tp->dev = dev;
-       tp->pm_cap = pm_cap;
+       tp->pm_cap = pdev->pm_cap;
        tp->rx_mode = TG3_DEF_RX_MODE;
        tp->tx_mode = TG3_DEF_TX_MODE;
        tp->irq_sync = 1;
@@ -17406,9 +17585,6 @@ err_out_iounmap:
 err_out_free_dev:
        free_netdev(dev);
 
-err_out_power_down:
-       pci_set_power_state(pdev, PCI_D3hot);
-
 err_out_free_res:
        pci_release_regions(pdev);
 
@@ -17518,6 +17694,8 @@ static int tg3_resume(struct device *device)
 
        tg3_full_lock(tp, 0);
 
+       tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
+
        tg3_flag_set(tp, INIT_COMPLETE);
        err = tg3_restart_hw(tp,
                             !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
@@ -17579,10 +17757,13 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
        tg3_full_unlock(tp);
 
 done:
-       if (state == pci_channel_io_perm_failure)
+       if (state == pci_channel_io_perm_failure) {
+               tg3_napi_enable(tp);
+               dev_close(netdev);
                err = PCI_ERS_RESULT_DISCONNECT;
-       else
+       } else {
                pci_disable_device(pdev);
+       }
 
        rtnl_unlock();
 
@@ -17628,6 +17809,10 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
        rc = PCI_ERS_RESULT_RECOVERED;
 
 done:
+       if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
+               tg3_napi_enable(tp);
+               dev_close(netdev);
+       }
        rtnl_unlock();
 
        return rc;
@@ -17652,6 +17837,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
                goto done;
 
        tg3_full_lock(tp, 0);
+       tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
        tg3_flag_set(tp, INIT_COMPLETE);
        err = tg3_restart_hw(tp, true);
        if (err) {
@@ -17689,15 +17875,4 @@ static struct pci_driver tg3_driver = {
        .driver.pm      = &tg3_pm_ops,
 };
 
-static int __init tg3_init(void)
-{
-       return pci_register_driver(&tg3_driver);
-}
-
-static void __exit tg3_cleanup(void)
-{
-       pci_unregister_driver(&tg3_driver);
-}
-
-module_init(tg3_init);
-module_exit(tg3_cleanup);
+module_pci_driver(tg3_driver);
index 9b2d3ac2474adda8e25398460f33d0abca38b05a..cd63d1189aae9fdfbcceb5615d76b8d6d74704d2 100644 (file)
 #define TG3_CPMU_EEE_DBTMR1            0x000036b4
 #define  TG3_CPMU_DBTMR1_PCIEXIT_2047US         0x07ff0000
 #define  TG3_CPMU_DBTMR1_LNKIDLE_2047US         0x000007ff
+#define  TG3_CPMU_DBTMR1_LNKIDLE_MAX    0x0000ffff
 #define TG3_CPMU_EEE_DBTMR2            0x000036b8
 #define  TG3_CPMU_DBTMR2_APE_TX_2047US  0x07ff0000
 #define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US         0x000007ff
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL     0x00004910
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K   0x00030000
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K  0x000c0000
-#define TG3_LSO_RD_DMA_TX_LENGTH_WA     0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719        0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720        0x00200000
 /* 0x4914 --> 0x4be0 unused */
 
 #define TG3_NUM_RDMA_CHANNELS          4
@@ -3059,7 +3061,7 @@ enum TG3_FLAGS {
        TG3_FLAG_APE_HAS_NCSI,
        TG3_FLAG_TX_TSTAMP_EN,
        TG3_FLAG_4K_FIFO_LIMIT,
-       TG3_FLAG_5719_RDMA_BUG,
+       TG3_FLAG_5719_5720_RDMA_BUG,
        TG3_FLAG_RESET_TASK_PENDING,
        TG3_FLAG_PTP_CAPABLE,
        TG3_FLAG_5705_PLUS,
@@ -3371,6 +3373,7 @@ struct tg3 {
        unsigned int                    irq_cnt;
 
        struct ethtool_coalesce         coal;
+       struct ethtool_eee              eee;
 
        /* firmware info */
        const char                      *fw_needed;
index e423f82da4906b2f1a8a3a4680c52e05fc9a9678..b7d8127c198f7ef92f6e9f74201041464da3f64b 100644 (file)
@@ -164,7 +164,8 @@ struct bfa_ioc_attr {
        u8                              port_mode;      /*!< enum bfa_mode */
        u8                              cap_bm;         /*!< capability */
        u8                              port_mode_cfg;  /*!< enum bfa_mode */
-       u8                              rsvd[4];        /*!< 64bit align */
+       u8                              def_fn;         /*!< 1 if default fn */
+       u8                              rsvd[3];        /*!< 64bit align */
 };
 
 /* Adapter capability mask definition */
index f2b73ffa91224b732459f2c3dd777449846c1331..6f3cac060f29fb2eed554c0b42cfec9325c7ba39 100644 (file)
@@ -2371,7 +2371,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
        memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
 
        ioc_attr->state = bfa_ioc_get_state(ioc);
-       ioc_attr->port_id = ioc->port_id;
+       ioc_attr->port_id = bfa_ioc_portid(ioc);
        ioc_attr->port_mode = ioc->port_mode;
 
        ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
@@ -2381,8 +2381,9 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
 
        bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
 
-       ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
-       ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
+       ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
+       ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
+       ioc_attr->def_fn = bfa_ioc_is_default(ioc);
        bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
 }
 
index 63a85e555df8b3815c9ee8181428fffe627c0706..f04e0aab25b43a763019c1e8f8e61fa591ad1ca9 100644 (file)
@@ -222,6 +222,8 @@ struct bfa_ioc_hwif {
 #define bfa_ioc_bar0(__ioc)            ((__ioc)->pcidev.pci_bar_kva)
 #define bfa_ioc_portid(__ioc)          ((__ioc)->port_id)
 #define bfa_ioc_asic_gen(__ioc)                ((__ioc)->asic_gen)
+#define bfa_ioc_is_default(__ioc)      \
+       (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
 #define bfa_ioc_fetch_stats(__ioc, __stats) \
                (((__stats)->drv_stats) = (__ioc)->stats)
 #define bfa_ioc_clr_stats(__ioc)       \
index 25dae757e9c42661b094270786d5f141ab72c3b4..f1eafc409bbd131894146d775b5b586d5318de86 100644 (file)
@@ -455,6 +455,8 @@ void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
 void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
 void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
                               struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
+                              struct bfi_msgq_mhdr *msghdr);
 
 /* APIs for BNA */
 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
index db14f69d63bcd4b5268b0c9d76bff81739de41ed..3ca77fad4851ca077e1f598e28d46155887f22c2 100644 (file)
@@ -298,7 +298,6 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
        case BFI_ENET_I2H_RSS_ENABLE_RSP:
        case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
        case BFI_ENET_I2H_RX_DEFAULT_RSP:
-       case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
        case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
        case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
        case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
@@ -311,6 +310,12 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
                        bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
                break;
 
+       case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
+               bna_rx_from_rid(bna, msghdr->enet_id, rx);
+               if (rx)
+                       bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
+               break;
+
        case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
                bna_rx_from_rid(bna, msghdr->enet_id, rx);
                if (rx)
index ea6f4a036401aae98d9e02126119d343f05ede71..57cd1bff59f1cd056f5d7b68755612ef7e985426 100644 (file)
@@ -710,6 +710,21 @@ bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
        bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
 }
 
+void
+bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
+                       struct bfi_msgq_mhdr *msghdr)
+{
+       struct bfi_enet_rsp *rsp =
+               (struct bfi_enet_rsp *)msghdr;
+
+       if (rsp->error) {
+               /* Clear ucast from cache */
+               rxf->ucast_active_set = 0;
+       }
+
+       bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
+}
+
 void
 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
                        struct bfi_msgq_mhdr *msghdr)
index ce4a030d3d0cab9705e8715b6169a9031b0fd840..b78e69e0e52a291047e72c222530a1500e4ac003 100644 (file)
@@ -2624,6 +2624,9 @@ bnad_stop(struct net_device *netdev)
        bnad_destroy_tx(bnad, 0);
        bnad_destroy_rx(bnad, 0);
 
+       /* These config flags are cleared in the hardware */
+       bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
+
        /* Synchronize mailbox IRQ */
        bnad_mbox_irq_sync(bnad);
 
@@ -3236,9 +3239,10 @@ bnad_init(struct bnad *bnad,
 
        sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
        bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
-
-       if (!bnad->work_q)
+       if (!bnad->work_q) {
+               iounmap(bnad->bar0);
                return -ENOMEM;
+       }
 
        return 0;
 }
index c1d0bc059bfd57b1d6ac744a7ba5c6ccdf1945ae..aefee77523f2fbc52c621883ebe8fd56a5ffbfce 100644 (file)
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
 #define BNAD_NAME                      "bna"
 #define BNAD_NAME_LEN                  64
 
-#define BNAD_VERSION                   "3.1.2.1"
+#define BNAD_VERSION                   "3.2.21.1"
 
 #define BNAD_MAILBOX_MSIX_INDEX                0
 #define BNAD_MAILBOX_MSIX_VECTORS      1
index 6e8bc9d88c418c5eff9838948303c16c71019cc1..94d957d203a6466b9b809d49962f6630ed2bc7cb 100644 (file)
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
                file->f_pos += offset;
                break;
        case 2:
-               file->f_pos = debug->buffer_len - offset;
+               file->f_pos = debug->buffer_len + offset;
                break;
        default:
                return -EINVAL;
index 14ca9317c9150139f3047ab29fb7fba7835bdb61..c37f706d9992fadcfbd90016d8d23c7472452672 100644 (file)
@@ -37,8 +37,8 @@
 
 extern char bfa_version[];
 
-#define CNA_FW_FILE_CT "ctfw-3.1.0.0.bin"
-#define CNA_FW_FILE_CT2        "ct2fw-3.1.0.0.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin"
+#define CNA_FW_FILE_CT2        "ct2fw-3.2.1.0.bin"
 #define FC_SYMNAME_MAX 256     /*!< max name server symbolic name size */
 
 #pragma pack(1)
index 1194446f859a0018f2be94d945b03a64a568d861..8030cc0396fd22f1568fb0ded2eb3823b5bdd700 100644 (file)
@@ -22,8 +22,7 @@ if NET_CADENCE
 
 config ARM_AT91_ETHER
        tristate "AT91RM9200 Ethernet support"
-       depends on GENERIC_HARDIRQS
-       select NET_CORE
+       depends on GENERIC_HARDIRQS && HAS_DMA
        select MACB
        ---help---
          If you wish to compile a kernel for the AT91RM9200 and enable
@@ -31,6 +30,7 @@ config ARM_AT91_ETHER
 
 config MACB
        tristate "Cadence MACB/GEM support"
+       depends on HAS_DMA
        select PHYLIB
        ---help---
          The Cadence MACB ethernet interface is found on many Atmel AT32 and
index cc9a185f0abbc03dfb9265a86a2d9fa13b07ba10..3f1957158a3bc876839d0613f1cbb0e7079687a5 100644 (file)
@@ -435,7 +435,6 @@ static int at91ether_remove(struct platform_device *pdev)
        unregister_netdev(dev);
        clk_disable(lp->pclk);
        free_netdev(dev);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 6be513deb17f69b73be4b321821e99d3ec7a17cd..f7e21f278e0571fd76386be899e392a61965b0a1 100644 (file)
@@ -32,7 +32,8 @@
 
 #include "macb.h"
 
-#define RX_BUFFER_SIZE         128
+#define MACB_RX_BUFFER_SIZE    128
+#define RX_BUFFER_MULTIPLE     64  /* bytes */
 #define RX_RING_SIZE           512 /* must be power of 2 */
 #define RX_RING_BYTES          (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 
@@ -92,7 +93,7 @@ static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
 {
-       return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
+       return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
 }
 
 void macb_set_hwaddr(struct macb *bp)
@@ -485,7 +486,8 @@ static void macb_tx_interrupt(struct macb *bp)
        status = macb_readl(bp, TSR);
        macb_writel(bp, TSR, status);
 
-       macb_writel(bp, ISR, MACB_BIT(TCOMP));
+       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+               macb_writel(bp, ISR, MACB_BIT(TCOMP));
 
        netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
                (unsigned long)status);
@@ -527,6 +529,155 @@ static void macb_tx_interrupt(struct macb *bp)
                netif_wake_queue(bp->dev);
 }
 
+static void gem_rx_refill(struct macb *bp)
+{
+       unsigned int            entry;
+       struct sk_buff          *skb;
+       struct macb_dma_desc    *desc;
+       dma_addr_t              paddr;
+
+       while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+               u32 addr, ctrl;
+
+               entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+               desc = &bp->rx_ring[entry];
+
+               /* Make hw descriptor updates visible to CPU */
+               rmb();
+
+               addr = desc->addr;
+               ctrl = desc->ctrl;
+               bp->rx_prepared_head++;
+
+               if ((addr & MACB_BIT(RX_USED)))
+                       continue;
+
+               if (bp->rx_skbuff[entry] == NULL) {
+                       /* allocate sk_buff for this free entry in ring */
+                       skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
+                       if (unlikely(skb == NULL)) {
+                               netdev_err(bp->dev,
+                                          "Unable to allocate sk_buff\n");
+                               break;
+                       }
+                       bp->rx_skbuff[entry] = skb;
+
+                       /* now fill corresponding descriptor entry */
+                       paddr = dma_map_single(&bp->pdev->dev, skb->data,
+                                              bp->rx_buffer_size, DMA_FROM_DEVICE);
+
+                       if (entry == RX_RING_SIZE - 1)
+                               paddr |= MACB_BIT(RX_WRAP);
+                       bp->rx_ring[entry].addr = paddr;
+                       bp->rx_ring[entry].ctrl = 0;
+
+                       /* properly align Ethernet header */
+                       skb_reserve(skb, NET_IP_ALIGN);
+               }
+       }
+
+       /* Make descriptor updates visible to hardware */
+       wmb();
+
+       netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
+                  bp->rx_prepared_head, bp->rx_tail);
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+                                 unsigned int end)
+{
+       unsigned int frag;
+
+       for (frag = begin; frag != end; frag++) {
+               struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+               desc->addr &= ~MACB_BIT(RX_USED);
+       }
+
+       /* Make descriptor updates visible to hardware */
+       wmb();
+
+       /*
+        * When this happens, the hardware stats registers for
+        * whatever caused this is updated, so we don't have to record
+        * anything.
+        */
+}
+
+static int gem_rx(struct macb *bp, int budget)
+{
+       unsigned int            len;
+       unsigned int            entry;
+       struct sk_buff          *skb;
+       struct macb_dma_desc    *desc;
+       int                     count = 0;
+
+       while (count < budget) {
+               u32 addr, ctrl;
+
+               entry = macb_rx_ring_wrap(bp->rx_tail);
+               desc = &bp->rx_ring[entry];
+
+               /* Make hw descriptor updates visible to CPU */
+               rmb();
+
+               addr = desc->addr;
+               ctrl = desc->ctrl;
+
+               if (!(addr & MACB_BIT(RX_USED)))
+                       break;
+
+               desc->addr &= ~MACB_BIT(RX_USED);
+               bp->rx_tail++;
+               count++;
+
+               if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
+                       netdev_err(bp->dev,
+                                  "not whole frame pointed by descriptor\n");
+                       bp->stats.rx_dropped++;
+                       break;
+               }
+               skb = bp->rx_skbuff[entry];
+               if (unlikely(!skb)) {
+                       netdev_err(bp->dev,
+                                  "inconsistent Rx descriptor chain\n");
+                       bp->stats.rx_dropped++;
+                       break;
+               }
+               /* now everything is ready for receiving packet */
+               bp->rx_skbuff[entry] = NULL;
+               len = MACB_BFEXT(RX_FRMLEN, ctrl);
+
+               netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
+
+               skb_put(skb, len);
+               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
+               dma_unmap_single(&bp->pdev->dev, addr,
+                                len, DMA_FROM_DEVICE);
+
+               skb->protocol = eth_type_trans(skb, bp->dev);
+               skb_checksum_none_assert(skb);
+
+               bp->stats.rx_packets++;
+               bp->stats.rx_bytes += skb->len;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+               netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+                           skb->len, skb->csum);
+               print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
+                              skb->mac_header, 16, true);
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
+                              skb->data, 32, true);
+#endif
+
+               netif_receive_skb(skb);
+       }
+
+       gem_rx_refill(bp);
+
+       return count;
+}
+
 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
                         unsigned int last_frag)
 {
@@ -574,7 +725,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
        skb_put(skb, len);
 
        for (frag = first_frag; ; frag++) {
-               unsigned int frag_len = RX_BUFFER_SIZE;
+               unsigned int frag_len = bp->rx_buffer_size;
 
                if (offset + frag_len > len) {
                        BUG_ON(frag != last_frag);
@@ -582,7 +733,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
                }
                skb_copy_to_linear_data_offset(skb, offset,
                                macb_rx_buffer(bp, frag), frag_len);
-               offset += RX_BUFFER_SIZE;
+               offset += bp->rx_buffer_size;
                desc = macb_rx_desc(bp, frag);
                desc->addr &= ~MACB_BIT(RX_USED);
 
@@ -605,27 +756,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
        return 0;
 }
 
-/* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
-                                 unsigned int end)
-{
-       unsigned int frag;
-
-       for (frag = begin; frag != end; frag++) {
-               struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
-               desc->addr &= ~MACB_BIT(RX_USED);
-       }
-
-       /* Make descriptor updates visible to hardware */
-       wmb();
-
-       /*
-        * When this happens, the hardware stats registers for
-        * whatever caused this is updated, so we don't have to record
-        * anything.
-        */
-}
-
 static int macb_rx(struct macb *bp, int budget)
 {
        int received = 0;
@@ -686,7 +816,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
        netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
                   (unsigned long)status, budget);
 
-       work_done = macb_rx(bp, budget);
+       work_done = bp->macbgem_ops.mog_rx(bp, budget);
        if (work_done < budget) {
                napi_complete(napi);
 
@@ -738,7 +868,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                         * now.
                         */
                        macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
-                       macb_writel(bp, ISR, MACB_BIT(RCOMP));
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(RCOMP));
 
                        if (napi_schedule_prep(&bp->napi)) {
                                netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -868,12 +999,71 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
+{
+       if (!macb_is_gem(bp)) {
+               bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
+       } else {
+               bp->rx_buffer_size = size;
+
+               if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
+                       netdev_dbg(bp->dev,
+                                   "RX buffer must be multiple of %d bytes, expanding\n",
+                                   RX_BUFFER_MULTIPLE);
+                       bp->rx_buffer_size =
+                               roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
+               }
+       }
+
+       netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
+                  bp->dev->mtu, bp->rx_buffer_size);
+}
+
+static void gem_free_rx_buffers(struct macb *bp)
+{
+       struct sk_buff          *skb;
+       struct macb_dma_desc    *desc;
+       dma_addr_t              addr;
+       int i;
+
+       if (!bp->rx_skbuff)
+               return;
+
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               skb = bp->rx_skbuff[i];
+
+               if (skb == NULL)
+                       continue;
+
+               desc = &bp->rx_ring[i];
+               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+               dma_unmap_single(&bp->pdev->dev, addr, skb->len,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+               skb = NULL;
+       }
+
+       kfree(bp->rx_skbuff);
+       bp->rx_skbuff = NULL;
+}
+
+static void macb_free_rx_buffers(struct macb *bp)
+{
+       if (bp->rx_buffers) {
+               dma_free_coherent(&bp->pdev->dev,
+                                 RX_RING_SIZE * bp->rx_buffer_size,
+                                 bp->rx_buffers, bp->rx_buffers_dma);
+               bp->rx_buffers = NULL;
+       }
+}
+
 static void macb_free_consistent(struct macb *bp)
 {
        if (bp->tx_skb) {
                kfree(bp->tx_skb);
                bp->tx_skb = NULL;
        }
+       bp->macbgem_ops.mog_free_rx_buffers(bp);
        if (bp->rx_ring) {
                dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
                                  bp->rx_ring, bp->rx_ring_dma);
@@ -884,12 +1074,37 @@ static void macb_free_consistent(struct macb *bp)
                                  bp->tx_ring, bp->tx_ring_dma);
                bp->tx_ring = NULL;
        }
-       if (bp->rx_buffers) {
-               dma_free_coherent(&bp->pdev->dev,
-                                 RX_RING_SIZE * RX_BUFFER_SIZE,
-                                 bp->rx_buffers, bp->rx_buffers_dma);
-               bp->rx_buffers = NULL;
-       }
+}
+
+static int gem_alloc_rx_buffers(struct macb *bp)
+{
+       int size;
+
+       size = RX_RING_SIZE * sizeof(struct sk_buff *);
+       bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
+       if (!bp->rx_skbuff)
+               return -ENOMEM;
+       else
+               netdev_dbg(bp->dev,
+                          "Allocated %d RX struct sk_buff entries at %p\n",
+                          RX_RING_SIZE, bp->rx_skbuff);
+       return 0;
+}
+
+static int macb_alloc_rx_buffers(struct macb *bp)
+{
+       int size;
+
+       size = RX_RING_SIZE * bp->rx_buffer_size;
+       bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+                                           &bp->rx_buffers_dma, GFP_KERNEL);
+       if (!bp->rx_buffers)
+               return -ENOMEM;
+       else
+               netdev_dbg(bp->dev,
+                          "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+                          size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+       return 0;
 }
 
 static int macb_alloc_consistent(struct macb *bp)
@@ -919,14 +1134,8 @@ static int macb_alloc_consistent(struct macb *bp)
                   "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
                   size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
 
-       size = RX_RING_SIZE * RX_BUFFER_SIZE;
-       bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
-                                           &bp->rx_buffers_dma, GFP_KERNEL);
-       if (!bp->rx_buffers)
+       if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
                goto out_err;
-       netdev_dbg(bp->dev,
-                  "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
-                  size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
 
        return 0;
 
@@ -935,6 +1144,21 @@ out_err:
        return -ENOMEM;
 }
 
+static void gem_init_rings(struct macb *bp)
+{
+       int i;
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               bp->tx_ring[i].addr = 0;
+               bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+       }
+       bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+       bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
+
+       gem_rx_refill(bp);
+}
+
 static void macb_init_rings(struct macb *bp)
 {
        int i;
@@ -944,7 +1168,7 @@ static void macb_init_rings(struct macb *bp)
        for (i = 0; i < RX_RING_SIZE; i++) {
                bp->rx_ring[i].addr = addr;
                bp->rx_ring[i].ctrl = 0;
-               addr += RX_BUFFER_SIZE;
+               addr += bp->rx_buffer_size;
        }
        bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
 
@@ -1054,7 +1278,7 @@ static void macb_configure_dma(struct macb *bp)
 
        if (macb_is_gem(bp)) {
                dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
-               dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
+               dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
                dmacfg |= GEM_BF(FBLDO, 16);
                dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
                dmacfg &= ~GEM_BIT(ENDIA);
@@ -1062,6 +1286,17 @@ static void macb_configure_dma(struct macb *bp)
        }
 }
 
+/*
+ * Configure peripheral capacities according to integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+       if (macb_is_gem(bp)) {
+               if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+                       bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+       }
+}
+
 static void macb_init_hw(struct macb *bp)
 {
        u32 config;
@@ -1084,6 +1319,7 @@ static void macb_init_hw(struct macb *bp)
        bp->duplex = DUPLEX_HALF;
 
        macb_configure_dma(bp);
+       macb_configure_caps(bp);
 
        /* Initialize TX and RX buffers */
        macb_writel(bp, RBQP, bp->rx_ring_dma);
@@ -1219,6 +1455,7 @@ EXPORT_SYMBOL_GPL(macb_set_rx_mode);
 static int macb_open(struct net_device *dev)
 {
        struct macb *bp = netdev_priv(dev);
+       size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
        int err;
 
        netdev_dbg(bp->dev, "open\n");
@@ -1230,6 +1467,9 @@ static int macb_open(struct net_device *dev)
        if (!bp->phy_dev)
                return -EAGAIN;
 
+       /* RX buffers initialization */
+       macb_init_rx_buffer_size(bp, bufsz);
+
        err = macb_alloc_consistent(bp);
        if (err) {
                netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
@@ -1239,7 +1479,7 @@ static int macb_open(struct net_device *dev)
 
        napi_enable(&bp->napi);
 
-       macb_init_rings(bp);
+       bp->macbgem_ops.mog_init_rings(bp);
        macb_init_hw(bp);
 
        /* schedule a link state check */
@@ -1558,6 +1798,19 @@ static int __init macb_probe(struct platform_device *pdev)
 
        dev->base_addr = regs->start;
 
+       /* setup appropriated routines according to adapter type */
+       if (macb_is_gem(bp)) {
+               bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
+               bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
+               bp->macbgem_ops.mog_init_rings = gem_init_rings;
+               bp->macbgem_ops.mog_rx = gem_rx;
+       } else {
+               bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
+               bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
+               bp->macbgem_ops.mog_init_rings = macb_init_rings;
+               bp->macbgem_ops.mog_rx = macb_rx;
+       }
+
        /* Set MII management clock divider */
        config = macb_mdc_clk_div(bp);
        config |= macb_dbw(bp);
@@ -1635,7 +1888,6 @@ err_out_put_pclk:
 err_out_free_dev:
        free_netdev(dev);
 err_out:
-       platform_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -1661,7 +1913,6 @@ static int __exit macb_remove(struct platform_device *pdev)
                clk_disable_unprepare(bp->pclk);
                clk_put(bp->pclk);
                free_netdev(dev);
-               platform_set_drvdata(pdev, NULL);
        }
 
        return 0;
index 993d703806885d9f3ebedd4f5c035bfdd03306d8..f4076155bed7aa07eb2b04f133a3cab86f75cb23 100644 (file)
 #define MACB_REV_SIZE                          16
 
 /* Bitfields in DCFG1. */
+#define GEM_IRQCOR_OFFSET                      23
+#define GEM_IRQCOR_SIZE                                1
 #define GEM_DBWDEF_OFFSET                      25
 #define GEM_DBWDEF_SIZE                                3
 
 #define MACB_MAN_READ                          2
 #define MACB_MAN_CODE                          2
 
+/* Capability mask bits */
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x1
+
 /* Bit manipulation macros */
 #define MACB_BIT(name)                                 \
        (1 << MACB_##name##_OFFSET)
@@ -540,12 +545,24 @@ struct gem_stats {
        u32     rx_udp_checksum_errors;
 };
 
+struct macb;
+
+struct macb_or_gem_ops {
+       int     (*mog_alloc_rx_buffers)(struct macb *bp);
+       void    (*mog_free_rx_buffers)(struct macb *bp);
+       void    (*mog_init_rings)(struct macb *bp);
+       int     (*mog_rx)(struct macb *bp, int budget);
+};
+
 struct macb {
        void __iomem            *regs;
 
        unsigned int            rx_tail;
+       unsigned int            rx_prepared_head;
        struct macb_dma_desc    *rx_ring;
+       struct sk_buff          **rx_skbuff;
        void                    *rx_buffers;
+       size_t                  rx_buffer_size;
 
        unsigned int            tx_head, tx_tail;
        struct macb_dma_desc    *tx_ring;
@@ -568,12 +585,16 @@ struct macb {
        dma_addr_t              tx_ring_dma;
        dma_addr_t              rx_buffers_dma;
 
+       struct macb_or_gem_ops  macbgem_ops;
+
        struct mii_bus          *mii_bus;
        struct phy_device       *phy_dev;
        unsigned int            link;
        unsigned int            speed;
        unsigned int            duplex;
 
+       u32                     caps;
+
        phy_interface_t         phy_interface;
 
        /* AT91RM9200 transmit */
index aba435c3d4ae9fa315cbb3781447cb0b1ac1470e..184a063bed5fa59bbdc705e29355134a2d31d731 100644 (file)
@@ -1,6 +1,6 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAS_DMA
        select CRC32
        help
          This is the driver for the XGMAC Ethernet IP block found on Calxeda
index 4a1f2fa812abea3e32ce97880b0c1ea5f41287b8..7cb148c495c900ad4385ed3c59db7f63f389821b 100644 (file)
@@ -1790,7 +1790,6 @@ err_io:
        free_netdev(ndev);
 err_alloc:
        release_mem_region(res->start, resource_size(res));
-       platform_set_drvdata(pdev, NULL);
        return ret;
 }
 
@@ -1813,7 +1812,6 @@ static int xgmac_remove(struct platform_device *pdev)
        free_irq(ndev->irq, ndev);
        free_irq(priv->pmt_irq, ndev);
 
-       platform_set_drvdata(pdev, NULL);
        unregister_netdev(ndev);
        netif_napi_del(&priv->napi);
 
index 9624cfe7df57154a7377f34253e3d42801762d23..d7048db9863d905a57b23b9bc985a4c3d27f9bc9 100644 (file)
@@ -1351,22 +1351,11 @@ static void remove_one(struct pci_dev *pdev)
        t1_sw_reset(pdev);
 }
 
-static struct pci_driver driver = {
+static struct pci_driver cxgb_pci_driver = {
        .name     = DRV_NAME,
        .id_table = t1_pci_tbl,
        .probe    = init_one,
        .remove   = remove_one,
 };
 
-static int __init t1_init_module(void)
-{
-       return pci_register_driver(&driver);
-}
-
-static void __exit t1_cleanup_module(void)
-{
-       pci_unregister_driver(&driver);
-}
-
-module_init(t1_init_module);
-module_exit(t1_cleanup_module);
+module_pci_driver(cxgb_pci_driver);
index 0c96e5fe99cc263461d210745c78cafc1123d4a7..4058b856eb710779bf7f164ed1547749f6a26b94 100644 (file)
@@ -1246,6 +1246,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
        struct tid_range stid_range, tid_range;
        struct mtutab mtutab;
        unsigned int l2t_capacity;
+       struct l2t_data *l2td;
 
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        if (!t)
@@ -1261,8 +1262,8 @@ int cxgb3_offload_activate(struct adapter *adapter)
                goto out_free;
 
        err = -ENOMEM;
-       RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
-       if (!L2DATA(dev))
+       l2td = t3_init_l2t(l2t_capacity);
+       if (!l2td)
                goto out_free;
 
        natids = min(tid_range.num / 2, MAX_ATIDS);
@@ -1279,6 +1280,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
        INIT_LIST_HEAD(&t->list_node);
        t->dev = dev;
 
+       RCU_INIT_POINTER(dev->l2opt, l2td);
        T3C_DATA(dev) = t;
        dev->recv = process_rx;
        dev->neigh_update = t3_l2t_update;
@@ -1294,8 +1296,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
        return 0;
 
 out_free_l2t:
-       t3_free_l2t(L2DATA(dev));
-       RCU_INIT_POINTER(dev->l2opt, NULL);
+       t3_free_l2t(l2td);
 out_free:
        kfree(t);
        return err;
index f12e6b85a653c3d345c775281a3b7b9ccacdf90f..687ec4a8bb48de001cfa9d39ef233ee7c69a5d96 100644 (file)
@@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
                q->pg_chunk.offset = 0;
                mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
                                       0, q->alloc_size, PCI_DMA_FROMDEVICE);
+               if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
+                       __free_pages(q->pg_chunk.page, order);
+                       q->pg_chunk.page = NULL;
+                       return -EIO;
+               }
                q->pg_chunk.mapping = mapping;
        }
        sd->pg_chunk = q->pg_chunk;
@@ -949,40 +954,75 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
        return flits_to_desc(flits);
 }
 
+
+/*     map_skb - map a packet main body and its page fragments
+ *     @pdev: the PCI device
+ *     @skb: the packet
+ *     @addr: placeholder to save the mapped addresses
+ *
+ *     map the main body of an sk_buff and its page fragments, if any.
+ */
+static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
+                  dma_addr_t *addr)
+{
+       const skb_frag_t *fp, *end;
+       const struct skb_shared_info *si;
+
+       *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
+                              PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(pdev, *addr))
+               goto out_err;
+
+       si = skb_shinfo(skb);
+       end = &si->frags[si->nr_frags];
+
+       for (fp = si->frags; fp < end; fp++) {
+               *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
+                                          DMA_TO_DEVICE);
+               if (pci_dma_mapping_error(pdev, *addr))
+                       goto unwind;
+       }
+       return 0;
+
+unwind:
+       while (fp-- > si->frags)
+               dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
+                              DMA_TO_DEVICE);
+
+       pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
+out_err:
+       return -ENOMEM;
+}
+
 /**
- *     make_sgl - populate a scatter/gather list for a packet
+ *     write_sgl - populate a scatter/gather list for a packet
  *     @skb: the packet
  *     @sgp: the SGL to populate
  *     @start: start address of skb main body data to include in the SGL
  *     @len: length of skb main body data to include in the SGL
- *     @pdev: the PCI device
+ *     @addr: the list of the mapped addresses
  *
- *     Generates a scatter/gather list for the buffers that make up a packet
+ *     Copies the scatter/gather list for the buffers that make up a packet
  *     and returns the SGL size in 8-byte words.  The caller must size the SGL
  *     appropriately.
  */
-static inline unsigned int make_sgl(const struct sk_buff *skb,
+static inline unsigned int write_sgl(const struct sk_buff *skb,
                                    struct sg_ent *sgp, unsigned char *start,
-                                   unsigned int len, struct pci_dev *pdev)
+                                   unsigned int len, const dma_addr_t *addr)
 {
-       dma_addr_t mapping;
-       unsigned int i, j = 0, nfrags;
+       unsigned int i, j = 0, k = 0, nfrags;
 
        if (len) {
-               mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
                sgp->len[0] = cpu_to_be32(len);
-               sgp->addr[0] = cpu_to_be64(mapping);
-               j = 1;
+               sgp->addr[j++] = cpu_to_be64(addr[k++]);
        }
 
        nfrags = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < nfrags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
-                                          DMA_TO_DEVICE);
                sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
-               sgp->addr[j] = cpu_to_be64(mapping);
+               sgp->addr[j] = cpu_to_be64(addr[k++]);
                j ^= 1;
                if (j == 0)
                        ++sgp;
@@ -1138,7 +1178,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                            const struct port_info *pi,
                            unsigned int pidx, unsigned int gen,
                            struct sge_txq *q, unsigned int ndesc,
-                           unsigned int compl)
+                           unsigned int compl, const dma_addr_t *addr)
 {
        unsigned int flits, sgl_flits, cntrl, tso_info;
        struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1196,7 +1236,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
        }
 
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
+       sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
 
        write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
                         htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1227,6 +1267,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        struct netdev_queue *txq;
        struct sge_qset *qs;
        struct sge_txq *q;
+       dma_addr_t addr[MAX_SKB_FRAGS + 1];
 
        /*
         * The chip min packet length is 9 octets but play safe and reject
@@ -1255,6 +1296,11 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
+       if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
        q->in_use += ndesc;
        if (unlikely(credits - ndesc < q->stop_thres)) {
                t3_stop_tx_queue(txq, qs, q);
@@ -1312,7 +1358,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (likely(!skb_shared(skb)))
                skb_orphan(skb);
 
-       write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
+       write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
        check_ring_tx_db(adap, q);
        return NETDEV_TX_OK;
 }
@@ -1537,10 +1583,9 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
        dui = (struct deferred_unmap_info *)skb->head;
        p = dui->addr;
 
-       if (skb->tail - skb->transport_header)
-               pci_unmap_single(dui->pdev, *p++,
-                                skb->tail - skb->transport_header,
-                                PCI_DMA_TODEVICE);
+       if (skb_tail_pointer(skb) - skb_transport_header(skb))
+               pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
+                                skb_transport_header(skb), PCI_DMA_TODEVICE);
 
        si = skb_shinfo(skb);
        for (i = 0; i < si->nr_frags; i++)
@@ -1578,7 +1623,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
  */
 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
                          struct sge_txq *q, unsigned int pidx,
-                         unsigned int gen, unsigned int ndesc)
+                         unsigned int gen, unsigned int ndesc,
+                         const dma_addr_t *addr)
 {
        unsigned int sgl_flits, flits;
        struct work_request_hdr *from;
@@ -1599,9 +1645,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
 
        flits = skb_transport_offset(skb) / 8;
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
-                            skb->tail - skb->transport_header,
-                            adap->pdev);
+       sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
+                            skb_tail_pointer(skb) -
+                            skb_transport_header(skb), addr);
        if (need_skb_unmap()) {
                setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
                skb->destructor = deferred_unmap_destructor;
@@ -1627,7 +1673,7 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
 
        flits = skb_transport_offset(skb) / 8;  /* headers */
        cnt = skb_shinfo(skb)->nr_frags;
-       if (skb->tail != skb->transport_header)
+       if (skb_tail_pointer(skb) != skb_transport_header(skb))
                cnt++;
        return flits_to_desc(flits + sgl_len(cnt));
 }
@@ -1659,6 +1705,11 @@ again:   reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                goto again;
        }
 
+       if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
+               spin_unlock(&q->lock);
+               return NET_XMIT_SUCCESS;
+       }
+
        gen = q->gen;
        q->in_use += ndesc;
        pidx = q->pidx;
@@ -1669,7 +1720,7 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
        }
        spin_unlock(&q->lock);
 
-       write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+       write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
        check_ring_tx_db(adap, q);
        return NET_XMIT_SUCCESS;
 }
@@ -1687,6 +1738,7 @@ static void restart_offloadq(unsigned long data)
        struct sge_txq *q = &qs->txq[TXQ_OFLD];
        const struct port_info *pi = netdev_priv(qs->netdev);
        struct adapter *adap = pi->adapter;
+       unsigned int written = 0;
 
        spin_lock(&q->lock);
 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1706,10 +1758,14 @@ again:  reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                        break;
                }
 
+               if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
+                       break;
+
                gen = q->gen;
                q->in_use += ndesc;
                pidx = q->pidx;
                q->pidx += ndesc;
+               written += ndesc;
                if (q->pidx >= q->size) {
                        q->pidx -= q->size;
                        q->gen ^= 1;
@@ -1717,7 +1773,8 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
                __skb_unlink(skb, &q->sendq);
                spin_unlock(&q->lock);
 
-               write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+               write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
+                            (dma_addr_t *)skb->head);
                spin_lock(&q->lock);
        }
        spin_unlock(&q->lock);
@@ -1727,8 +1784,9 @@ again:    reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
        set_bit(TXQ_LAST_PKT_DB, &q->flags);
 #endif
        wmb();
-       t3_write_reg(adap, A_SG_KDOORBELL,
-                    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+       if (likely(written))
+               t3_write_reg(adap, A_SG_KDOORBELL,
+                            F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 }
 
 /**
index 681804b30a3fef09678b76b40b3cae54ebaa80d1..2aafb809e067b7a204a3fb1a92412756d9ceb78f 100644 (file)
@@ -51,7 +51,7 @@
 #include "t4_hw.h"
 
 #define FW_VERSION_MAJOR 1
-#define FW_VERSION_MINOR 1
+#define FW_VERSION_MINOR 4
 #define FW_VERSION_MICRO 0
 
 #define FW_VERSION_MAJOR_T5 0
index 3cd397d60434fe93b3005d2c329621a171bea24f..5a3256b083f23f7e69d8013c4ca1ac0585ec33c4 100644 (file)
@@ -4842,8 +4842,17 @@ static int adap_init0(struct adapter *adap)
         * is excessively mismatched relative to the driver.)
         */
        ret = t4_check_fw_version(adap);
+
+       /* The error code -EFAULT is returned by t4_check_fw_version() if
+        * firmware on adapter < supported firmware. If firmware on adapter
+        * is too old (not supported by driver) and we're the MASTER_PF set
+        * adapter state to DEV_STATE_UNINIT to force firmware upgrade
+        * and reinitialization.
+        */
+       if ((adap->flags & MASTER_PF) && ret == -EFAULT)
+               state = DEV_STATE_UNINIT;
        if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
-               if (ret == -EINVAL || ret > 0) {
+               if (ret == -EINVAL || ret == -EFAULT || ret > 0) {
                        if (upgrade_fw(adap) >= 0) {
                                /*
                                 * Note that the chip was reset as part of the
@@ -4852,7 +4861,21 @@ static int adap_init0(struct adapter *adap)
                                 */
                                reset = 0;
                                ret = t4_check_fw_version(adap);
-                       }
+                       } else
+                               if (ret == -EFAULT) {
+                                       /*
+                                        * Firmware is old but still might
+                                        * work if we force reinitialization
+                                        * of the adapter. Ignoring FW upgrade
+                                        * failure.
+                                        */
+                                       dev_warn(adap->pdev_dev,
+                                                "Ignoring firmware upgrade "
+                                                "failure, and forcing driver "
+                                                "to reinitialize the "
+                                                "adapter.\n");
+                                       ret = 0;
+                               }
                }
                if (ret < 0)
                        return ret;
index 2bfbb206b35af7c943f489ab5cbae9ebb44b8d7e..ac311f5f3eb9590d1d9b5fb3bb0a9c3f0fac5ce7 100644 (file)
@@ -1294,7 +1294,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 
        flits = skb_transport_offset(skb) / 8U;   /* headers */
        cnt = skb_shinfo(skb)->nr_frags;
-       if (skb->tail != skb->transport_header)
+       if (skb_tail_pointer(skb) != skb_transport_header(skb))
                cnt++;
        return flits + sgl_len(cnt);
 }
index d02d4e8c4417cf7d78a70586f6c532ef8355a76a..4cbb2f9850be554c9ec48afd7479b4fcd9e5ebc2 100644 (file)
@@ -938,6 +938,15 @@ int t4_check_fw_version(struct adapter *adapter)
        memcpy(adapter->params.api_vers, api_vers,
               sizeof(adapter->params.api_vers));
 
+       if (major < exp_major || (major == exp_major && minor < exp_minor) ||
+           (major == exp_major && minor == exp_minor && micro < exp_micro)) {
+               dev_err(adapter->pdev_dev,
+                       "Card has firmware version %u.%u.%u, minimum "
+                       "supported firmware is %u.%u.%u.\n", major, minor,
+                       micro, exp_major, exp_minor, exp_micro);
+               return -EFAULT;
+       }
+
        if (major != exp_major) {            /* major mismatch - fail */
                dev_err(adapter->pdev_dev,
                        "card FW has major version %u, driver wants %u\n",
@@ -3773,7 +3782,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
                p->lport = j;
                p->rss_size = rss_size;
                memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
-               adap->port[i]->dev_id = j;
 
                ret = ntohl(c.u.info.lstatus_to_modtype);
                p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
index 8388e36cf08f97b17d75230b68e3683a3a2b365e..7403dff8f14a4cf38090f8b32c57645d91b1770e 100644 (file)
@@ -44,7 +44,6 @@ config CS89x0_PLATFORM
 config EP93XX_ETH
        tristate "EP93xx Ethernet support"
        depends on ARM && ARCH_EP93XX
-       select NET_CORE
        select MII
        help
          This is a driver for the ethernet hardware included in EP93xx CPUs.
index 67b0388b6e68223d2281bacb8f69aa330f2193fa..e3d4ec836f8bee3f31b41a7502966470edd90655 100644 (file)
@@ -783,7 +783,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
        dev = platform_get_drvdata(pdev);
        if (dev == NULL)
                return 0;
-       platform_set_drvdata(pdev, NULL);
 
        ep = netdev_priv(dev);
 
index 9745fe5e8039ccfee2346449934246c747ec5c70..316c5e5a92ad450372cd5eb8fa81b70a7df795a7 100644 (file)
@@ -6,7 +6,6 @@ config DM9000
        tristate "DM9000 support"
        depends on ARM || BLACKFIN || MIPS || COLDFIRE
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          Support for DM9000 chipset.
index 9105465b2a1a92c5561c99bbda0df7902182ccbb..a13b312b50f20dfd544d88a338bd4263a4a716af 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/spinlock.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
 #include <linux/ethtool.h>
 #include <linux/dm9000.h>
 #include <linux/delay.h>
@@ -827,7 +829,7 @@ dm9000_hash_table_unlocked(struct net_device *dev)
        struct netdev_hw_addr *ha;
        int i, oft;
        u32 hash_val;
-       u16 hash_table[4];
+       u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
        u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
 
        dm9000_dbg(db, 1, "entering %s\n", __func__);
@@ -835,13 +837,6 @@ dm9000_hash_table_unlocked(struct net_device *dev)
        for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
                iow(db, oft, dev->dev_addr[i]);
 
-       /* Clear Hash Table */
-       for (i = 0; i < 4; i++)
-               hash_table[i] = 0x0;
-
-       /* broadcast address */
-       hash_table[3] = 0x8000;
-
        if (dev->flags & IFF_PROMISC)
                rcr |= RCR_PRMSC;
 
@@ -1358,6 +1353,31 @@ static const struct net_device_ops dm9000_netdev_ops = {
 #endif
 };
 
+static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
+{
+       struct dm9000_plat_data *pdata;
+       struct device_node *np = dev->of_node;
+       const void *mac_addr;
+
+       if (!IS_ENABLED(CONFIG_OF) || !np)
+               return NULL;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       if (of_find_property(np, "davicom,ext-phy", NULL))
+               pdata->flags |= DM9000_PLATF_EXT_PHY;
+       if (of_find_property(np, "davicom,no-eeprom", NULL))
+               pdata->flags |= DM9000_PLATF_NO_EEPROM;
+
+       mac_addr = of_get_mac_address(np);
+       if (mac_addr)
+               memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
+
+       return pdata;
+}
+
 /*
  * Search DM9000 board, allocate space and register it
  */
@@ -1373,6 +1393,12 @@ dm9000_probe(struct platform_device *pdev)
        int i;
        u32 id_val;
 
+       if (!pdata) {
+               pdata = dm9000_parse_dt(&pdev->dev);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+       }
+
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct board_info));
        if (!ndev)
@@ -1673,8 +1699,6 @@ dm9000_drv_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        unregister_netdev(ndev);
        dm9000_release_board(pdev, netdev_priv(ndev));
        free_netdev(ndev);              /* free device structure */
@@ -1683,11 +1707,20 @@ dm9000_drv_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id dm9000_of_matches[] = {
+       { .compatible = "davicom,dm9000", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dm9000_of_matches);
+#endif
+
 static struct platform_driver dm9000_driver = {
        .driver = {
                .name    = "dm9000",
                .owner   = THIS_MODULE,
                .pm      = &dm9000_drv_pm_ops,
+               .of_match_table = of_match_ptr(dm9000_of_matches),
        },
        .probe   = dm9000_probe,
        .remove  = dm9000_drv_remove,
index 1df33c799c0012a9144811c90c39a063e92fb45e..eb9ba6e97d04a4001d3884a1045c364edbc0bab2 100644 (file)
@@ -126,7 +126,6 @@ config WINBOND_840
        tristate "Winbond W89c840 Ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver is for the Winbond W89c840 chip.  It also works with 
index 28a5e425fecf04fd6199ffb4d518f7e4210d1123..92306b320840289e710502dcee938282c4a92e5e 100644 (file)
@@ -76,6 +76,12 @@ int tulip_refill_rx(struct net_device *dev)
 
                        mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
                                                 PCI_DMA_FROMDEVICE);
+                       if (dma_mapping_error(&tp->pdev->dev, mapping)) {
+                               dev_kfree_skb(skb);
+                               tp->rx_buffers[entry].skb = NULL;
+                               break;
+                       }
+
                        tp->rx_buffers[entry].mapping = mapping;
 
                        tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
index 1e9443d9fb5732fffa352683de3a019679c7f6b8..c94152f1c6be5396b511902a28478bae1c00e442 100644 (file)
@@ -1410,12 +1410,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return i;
        }
 
-       /* The chip will fail to enter a low-power state later unless
-        * first explicitly commanded into D0 */
-       if (pci_set_power_state(pdev, PCI_D0)) {
-               pr_notice("Failed to set power state to D0\n");
-       }
-
        irq = pdev->irq;
 
        /* alloc_etherdev ensures aligned and zeroed private structures */
index cdbcd16431411bf48ad3cf459c8cb12f3adbf537..9b84cb04fe5fccded9f26fe010cf16ea6bb36c8a 100644 (file)
@@ -1171,16 +1171,4 @@ investigate_write_descriptor(struct net_device *dev,
        }
 }
 
-static int __init xircom_init(void)
-{
-       return pci_register_driver(&xircom_ops);
-}
-
-static void __exit xircom_exit(void)
-{
-       pci_unregister_driver(&xircom_ops);
-}
-
-module_init(xircom_init)
-module_exit(xircom_exit)
-
+module_pci_driver(xircom_ops);
index ee26ce78e270cabfb7acff61745b597703bab13c..c543ac11ce08958a94ef8384b9211d10cd1dbfc2 100644 (file)
@@ -36,7 +36,6 @@ config SUNDANCE
        tristate "Sundance Alta support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver is for the Sundance "Alta" chip.
index f544b297c9abef7b7ee73fa3a5fd4eb2b9921f42..c827b1b6b1ceb81c5d10bdc2db6fcea3192701a3 100644 (file)
@@ -262,6 +262,7 @@ struct be_rx_compl_info {
        u8 ipv6;
        u8 vtm;
        u8 pkt_type;
+       u8 ip_frag;
 };
 
 struct be_rx_obj {
@@ -332,6 +333,9 @@ enum vf_state {
 #define BE_VF_UC_PMAC_COUNT            2
 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD            (1 << 11)
 
+/* Ethtool set_dump flags */
+#define LANCER_INITIATE_FW_DUMP                        0x1
+
 struct phy_info {
        u8 transceiver;
        u8 autoneg;
@@ -397,6 +401,7 @@ struct be_adapter {
        u32 cmd_privileges;
        /* Ethtool knobs and info */
        char fw_ver[FW_VER_LEN];
+       char fw_on_flash[FW_VER_LEN];
        int if_handle;          /* Used to configure filtering */
        u32 *pmac_id;           /* MAC addr handle used by BE card */
        u32 beacon_state;       /* for set_phys_id */
index fd7b547698abd89f829008e717e34f2fc9d9e2f9..6e6e0a117ee2fe16061ae8e0b77a51aaacd3336b 100644 (file)
@@ -562,7 +562,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
 
        resource_error = lancer_provisioning_error(adapter);
        if (resource_error)
-               return -1;
+               return -EAGAIN;
 
        status = lancer_wait_ready(adapter);
        if (!status) {
@@ -590,8 +590,8 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
         * when PF provisions resources.
         */
        resource_error = lancer_provisioning_error(adapter);
-       if (status == -1 && !resource_error)
-               adapter->eeh_error = true;
+       if (resource_error)
+               status = -EAGAIN;
 
        return status;
 }
@@ -2976,22 +2976,17 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
        for (i = 0; i < desc_count; i++) {
                desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
                if (((void *)desc + desc->desc_len) >
-                   (void *)(buf + max_buf_size)) {
-                       desc = NULL;
-                       break;
-               }
+                   (void *)(buf + max_buf_size))
+                       return NULL;
 
                if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
                    desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
-                       break;
+                       return desc;
 
                desc = (void *)desc + desc->desc_len;
        }
 
-       if (!desc || i == MAX_RESOURCE_DESC)
-               return NULL;
-
-       return desc;
+       return NULL;
 }
 
 /* Uses Mbox */
@@ -3260,6 +3255,72 @@ err:
        return status;
 }
 
+static int lancer_wait_idle(struct be_adapter *adapter)
+{
+#define SLIPORT_IDLE_TIMEOUT 30
+       u32 reg_val;
+       int status = 0, i;
+
+       for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
+               reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
+               if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
+                       break;
+
+               ssleep(1);
+       }
+
+       if (i == SLIPORT_IDLE_TIMEOUT)
+               status = -1;
+
+       return status;
+}
+
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
+{
+       int status = 0;
+
+       status = lancer_wait_idle(adapter);
+       if (status)
+               return status;
+
+       iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
+
+       return status;
+}
+
+/* Routine to check whether dump image is present or not */
+bool dump_present(struct be_adapter *adapter)
+{
+       u32 sliport_status = 0;
+
+       sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+       return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
+}
+
+int lancer_initiate_dump(struct be_adapter *adapter)
+{
+       int status;
+
+       /* give firmware reset and diagnostic dump */
+       status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
+                                    PHYSDEV_CONTROL_DD_MASK);
+       if (status < 0) {
+               dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
+               return status;
+       }
+
+       status = lancer_wait_idle(adapter);
+       if (status)
+               return status;
+
+       if (!dump_present(adapter)) {
+               dev_err(&adapter->pdev->dev, "Dump image not present\n");
+               return -1;
+       }
+
+       return 0;
+}
+
 /* Uses sync mcc */
 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
 {
index 025bdb0d1764e29ed92948a920a4273cf1172a35..5228d88c5a024e8e20be18e4f9fec69de7865e42 100644 (file)
@@ -1937,6 +1937,9 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
                                          struct be_dma_mem *cmd,
                                          struct be_fat_conf_params *cfgs);
 extern int lancer_wait_ready(struct be_adapter *adapter);
+extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+extern int lancer_initiate_dump(struct be_adapter *adapter);
+extern bool dump_present(struct be_adapter *adapter);
 extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
 extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
 extern int be_cmd_get_func_config(struct be_adapter *adapter);
index 3d4461adb3b4194eef3fe2dca339650665dac97b..4f8c941217cc017495736129373fe5d2ba40db60 100644 (file)
@@ -177,19 +177,15 @@ static void be_get_drvinfo(struct net_device *netdev,
                                struct ethtool_drvinfo *drvinfo)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       char fw_on_flash[FW_VER_LEN];
-
-       memset(fw_on_flash, 0 , sizeof(fw_on_flash));
-       be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
 
        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
-       if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN))
+       if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
                strlcpy(drvinfo->fw_version, adapter->fw_ver,
                        sizeof(drvinfo->fw_version));
        else
                snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-                        "%s [%s]", adapter->fw_ver, fw_on_flash);
+                        "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
@@ -673,6 +669,34 @@ be_set_phys_id(struct net_device *netdev,
        return 0;
 }
 
+static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       struct device *dev = &adapter->pdev->dev;
+       int status;
+
+       if (!lancer_chip(adapter)) {
+               dev_err(dev, "FW dump not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (dump_present(adapter)) {
+               dev_err(dev, "Previous dump not cleared, not forcing dump\n");
+               return 0;
+       }
+
+       switch (dump->flag) {
+       case LANCER_INITIATE_FW_DUMP:
+               status = lancer_initiate_dump(adapter);
+               if (!status)
+                       dev_info(dev, "F/w dump initiated successfully\n");
+               break;
+       default:
+               dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
+               return -EINVAL;
+       }
+       return status;
+}
 
 static void
 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1110,6 +1134,7 @@ const struct ethtool_ops be_ethtool_ops = {
        .set_pauseparam = be_set_pauseparam,
        .get_strings = be_get_stat_strings,
        .set_phys_id = be_set_phys_id,
+       .set_dump = be_set_dump,
        .get_msglevel = be_get_msg_level,
        .set_msglevel = be_set_msg_level,
        .get_sset_count = be_get_sset_count,
index 3c1099b47f2a7013da331c9e5b00dca0f0b80946..3e2162121601e79481428c4d7ca02c3523dc0e33 100644 (file)
 #define PHYSDEV_CONTROL_OFFSET         0x414
 
 #define SLIPORT_STATUS_ERR_MASK                0x80000000
+#define SLIPORT_STATUS_DIP_MASK                0x02000000
 #define SLIPORT_STATUS_RN_MASK         0x01000000
 #define SLIPORT_STATUS_RDY_MASK                0x00800000
 #define SLI_PORT_CONTROL_IP_MASK       0x08000000
 #define PHYSDEV_CONTROL_FW_RESET_MASK  0x00000002
+#define PHYSDEV_CONTROL_DD_MASK                0x00000004
 #define PHYSDEV_CONTROL_INP_MASK       0x40000000
 
 #define SLIPORT_ERROR_NO_RESOURCE1     0x2
@@ -356,7 +358,7 @@ struct amap_eth_rx_compl_v0 {
        u8 ip_version;          /* dword 1 */
        u8 macdst[6];           /* dword 1 */
        u8 vtp;                 /* dword 1 */
-       u8 rsvd0;               /* dword 1 */
+       u8 ip_frag;             /* dword 1 */
        u8 fragndx[10];         /* dword 1 */
        u8 ct[2];               /* dword 1 */
        u8 sw;                  /* dword 1 */
index a444110b060fd74361759be2f9f3a25a4f0fe095..cd69ac79f565bb4490c55507d0696d24ba84c106 100644 (file)
@@ -780,26 +780,18 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
        if (unlikely(!skb))
                return skb;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb))
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
-               skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-               if (skb)
-                       skb->vlan_tci = 0;
-       }
-
-       if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
-               if (!vlan_tag)
-                       vlan_tag = adapter->pvid;
-               if (skip_hw_vlan)
-                       *skip_hw_vlan = true;
-       }
+       else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
+               vlan_tag = adapter->pvid;
 
        if (vlan_tag) {
                skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
                if (unlikely(!skb))
                        return skb;
-
                skb->vlan_tci = 0;
+               if (skip_hw_vlan)
+                       *skip_hw_vlan = true;
        }
 
        /* Insert the outer VLAN, if any */
@@ -842,32 +834,39 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
        return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 }
 
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
+                               struct sk_buff *skb)
 {
-       return BE3_chip(adapter) &&
-               be_ipv6_exthdr_check(skb);
+       return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
 }
 
-static netdev_tx_t be_xmit(struct sk_buff *skb,
-                       struct net_device *netdev)
+static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
+                                          struct sk_buff *skb,
+                                          bool *skip_hw_vlan)
 {
-       struct be_adapter *adapter = netdev_priv(netdev);
-       struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
-       struct be_queue_info *txq = &txo->q;
-       struct iphdr *ip = NULL;
-       u32 wrb_cnt = 0, copied = 0;
-       u32 start = txq->head, eth_hdr_len;
-       bool dummy_wrb, stopped = false;
-       bool skip_hw_vlan = false;
        struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+       unsigned int eth_hdr_len;
+       struct iphdr *ip;
 
-       eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
-               VLAN_ETH_HLEN : ETH_HLEN;
+       /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
+        * may cause a transmit stall on that port. So the work-around is to
+        * pad such packets to a 36-byte length.
+        */
+       if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
+               if (skb_padto(skb, 36))
+                       goto tx_drop;
+               skb->len = 36;
+       }
 
        /* For padded packets, BE HW modifies tot_len field in IP header
         * incorrecly when VLAN tag is inserted by HW.
+        * For padded packets, Lancer computes incorrect checksum.
         */
-       if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
+       eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+                                               VLAN_ETH_HLEN : ETH_HLEN;
+       if (skb->len <= 60 &&
+           (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
+           is_ipv4_pkt(skb)) {
                ip = (struct iphdr *)ip_hdr(skb);
                pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
        }
@@ -877,15 +876,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
         */
        if ((adapter->function_mode & UMC_ENABLED) &&
            veh->h_vlan_proto == htons(ETH_P_8021Q))
-                       skip_hw_vlan = true;
+                       *skip_hw_vlan = true;
 
        /* HW has a bug wherein it will calculate CSUM for VLAN
         * pkts even though it is disabled.
         * Manually insert VLAN in pkt.
         */
        if (skb->ip_summed != CHECKSUM_PARTIAL &&
-                       vlan_tx_tag_present(skb)) {
-               skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
+           vlan_tx_tag_present(skb)) {
+               skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
                if (unlikely(!skb))
                        goto tx_drop;
        }
@@ -895,8 +894,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
         * skip HW tagging is not enabled by FW.
         */
        if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
-                    (adapter->pvid || adapter->qnq_vid) &&
-                    !qnq_async_evt_rcvd(adapter)))
+           (adapter->pvid || adapter->qnq_vid) &&
+           !qnq_async_evt_rcvd(adapter)))
                goto tx_drop;
 
        /* Manual VLAN tag insertion to prevent:
@@ -907,11 +906,31 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
         */
        if (be_ipv6_tx_stall_chk(adapter, skb) &&
            be_vlan_tag_tx_chk(adapter, skb)) {
-               skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
+               skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
                if (unlikely(!skb))
                        goto tx_drop;
        }
 
+       return skb;
+tx_drop:
+       dev_kfree_skb_any(skb);
+       return NULL;
+}
+
+static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+       struct be_queue_info *txq = &txo->q;
+       bool dummy_wrb, stopped = false;
+       u32 wrb_cnt = 0, copied = 0;
+       bool skip_hw_vlan = false;
+       u32 start = txq->head;
+
+       skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
+       if (!skb)
+               return NETDEV_TX_OK;
+
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
@@ -941,7 +960,6 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
                txq->head = start;
                dev_kfree_skb_any(skb);
        }
-tx_drop:
        return NETDEV_TX_OK;
 }
 
@@ -1244,30 +1262,6 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        return status;
 }
 
-static int be_find_vfs(struct be_adapter *adapter, int vf_state)
-{
-       struct pci_dev *dev, *pdev = adapter->pdev;
-       int vfs = 0, assigned_vfs = 0, pos;
-       u16 offset, stride;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
-       if (!pos)
-               return 0;
-       pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
-       pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
-
-       dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
-       while (dev) {
-               if (dev->is_virtfn && pci_physfn(dev) == pdev) {
-                       vfs++;
-                       if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
-                               assigned_vfs++;
-               }
-               dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
-       }
-       return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
-}
-
 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
 {
        struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
@@ -1607,6 +1601,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
                                               compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
+       rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+                                     ip_frag, compl);
 }
 
 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1628,6 +1624,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
        else
                be_parse_rx_compl_v0(compl, rxcp);
 
+       if (rxcp->ip_frag)
+               rxcp->l4_csum = 0;
+
        if (rxcp->vlanf) {
                /* vlanf could be wrongly set in some cards.
                 * ignore if vtm is not set */
@@ -2176,7 +2175,7 @@ static irqreturn_t be_msix(int irq, void *dev)
 
 static inline bool do_gro(struct be_rx_compl_info *rxcp)
 {
-       return (rxcp->tcpf && !rxcp->err) ? true : false;
+       return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
 }
 
 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
@@ -2774,7 +2773,7 @@ static void be_vf_clear(struct be_adapter *adapter)
        struct be_vf_cfg *vf_cfg;
        u32 vf;
 
-       if (be_find_vfs(adapter, ASSIGNED)) {
+       if (pci_vfs_assigned(adapter->pdev)) {
                dev_warn(&adapter->pdev->dev,
                         "VFs are assigned to VMs: not disabling VFs\n");
                goto done;
@@ -2876,7 +2875,7 @@ static int be_vf_setup(struct be_adapter *adapter)
        int status, old_vfs, vf;
        struct device *dev = &adapter->pdev->dev;
 
-       old_vfs = be_find_vfs(adapter, ENABLED);
+       old_vfs = pci_num_vf(adapter->pdev);
        if (old_vfs) {
                dev_info(dev, "%d VFs are already enabled\n", old_vfs);
                if (old_vfs != num_vfs)
@@ -3187,7 +3186,7 @@ static int be_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
+       be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
 
        if (adapter->vlans_added)
                be_vid_config(adapter);
@@ -3533,40 +3532,6 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
        return 0;
 }
 
-static int lancer_wait_idle(struct be_adapter *adapter)
-{
-#define SLIPORT_IDLE_TIMEOUT 30
-       u32 reg_val;
-       int status = 0, i;
-
-       for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
-               reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
-               if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
-                       break;
-
-               ssleep(1);
-       }
-
-       if (i == SLIPORT_IDLE_TIMEOUT)
-               status = -1;
-
-       return status;
-}
-
-static int lancer_fw_reset(struct be_adapter *adapter)
-{
-       int status = 0;
-
-       status = lancer_wait_idle(adapter);
-       if (status)
-               return status;
-
-       iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
-                 PHYSDEV_CONTROL_OFFSET);
-
-       return status;
-}
-
 static int lancer_fw_download(struct be_adapter *adapter,
                                const struct firmware *fw)
 {
@@ -3644,7 +3609,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
        }
 
        if (change_status == LANCER_FW_RESET_NEEDED) {
-               status = lancer_fw_reset(adapter);
+               status = lancer_physdev_ctrl(adapter,
+                                            PHYSDEV_CONTROL_FW_RESET_MASK);
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Adapter busy for FW reset.\n"
@@ -3779,6 +3745,10 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
        else
                status = be_fw_download(adapter, fw);
 
+       if (!status)
+               be_cmd_get_fw_ver(adapter, adapter->fw_ver,
+                                 adapter->fw_on_flash);
+
 fw_exit:
        release_firmware(fw);
        return status;
@@ -4101,6 +4071,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
 
 static int lancer_recover_func(struct be_adapter *adapter)
 {
+       struct device *dev = &adapter->pdev->dev;
        int status;
 
        status = lancer_test_and_set_rdy_state(adapter);
@@ -4112,8 +4083,7 @@ static int lancer_recover_func(struct be_adapter *adapter)
 
        be_clear(adapter);
 
-       adapter->hw_error = false;
-       adapter->fw_timeout = false;
+       be_clear_all_error(adapter);
 
        status = be_setup(adapter);
        if (status)
@@ -4125,13 +4095,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
                        goto err;
        }
 
-       dev_err(&adapter->pdev->dev,
-               "Adapter SLIPORT recovery succeeded\n");
+       dev_err(dev, "Error recovery successful\n");
        return 0;
 err:
-       if (adapter->eeh_error)
-               dev_err(&adapter->pdev->dev,
-                       "Adapter SLIPORT recovery failed\n");
+       if (status == -EAGAIN)
+               dev_err(dev, "Waiting for resource provisioning\n");
+       else
+               dev_err(dev, "Error recovery failed\n");
 
        return status;
 }
@@ -4140,28 +4110,27 @@ static void be_func_recovery_task(struct work_struct *work)
 {
        struct be_adapter *adapter =
                container_of(work, struct be_adapter,  func_recovery_work.work);
-       int status;
+       int status = 0;
 
        be_detect_error(adapter);
 
        if (adapter->hw_error && lancer_chip(adapter)) {
 
-               if (adapter->eeh_error)
-                       goto out;
-
                rtnl_lock();
                netif_device_detach(adapter->netdev);
                rtnl_unlock();
 
                status = lancer_recover_func(adapter);
-
                if (!status)
                        netif_device_attach(adapter->netdev);
        }
 
-out:
-       schedule_delayed_work(&adapter->func_recovery_work,
-                             msecs_to_jiffies(1000));
+       /* In Lancer, for all errors other than provisioning error (-EAGAIN),
+        * no need to attempt further recovery.
+        */
+       if (!status || status == -EAGAIN)
+               schedule_delayed_work(&adapter->func_recovery_work,
+                                     msecs_to_jiffies(1000));
 }
 
 static void be_worker(struct work_struct *work)
@@ -4207,9 +4176,10 @@ reschedule:
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
 
+/* If any VFs are already enabled don't FLR the PF */
 static bool be_reset_required(struct be_adapter *adapter)
 {
-       return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
+       return pci_num_vf(adapter->pdev) ? false : true;
 }
 
 static char *mc_name(struct be_adapter *adapter)
@@ -4266,6 +4236,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
                status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               if (!status)
+                       status = dma_set_coherent_mask(&pdev->dev,
+                                                      DMA_BIT_MASK(32));
                if (status) {
                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
                        goto free_netdev;
@@ -4444,20 +4417,19 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
 
        dev_err(&adapter->pdev->dev, "EEH error detected\n");
 
-       adapter->eeh_error = true;
-
-       cancel_delayed_work_sync(&adapter->func_recovery_work);
+       if (!adapter->eeh_error) {
+               adapter->eeh_error = true;
 
-       rtnl_lock();
-       netif_device_detach(netdev);
-       rtnl_unlock();
+               cancel_delayed_work_sync(&adapter->func_recovery_work);
 
-       if (netif_running(netdev)) {
                rtnl_lock();
-               be_close(netdev);
+               netif_device_detach(netdev);
+               if (netif_running(netdev))
+                       be_close(netdev);
                rtnl_unlock();
+
+               be_clear(adapter);
        }
-       be_clear(adapter);
 
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
@@ -4482,7 +4454,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
        int status;
 
        dev_info(&adapter->pdev->dev, "EEH reset\n");
-       be_clear_all_error(adapter);
 
        status = pci_enable_device(pdev);
        if (status)
@@ -4500,6 +4471,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
 
        pci_cleanup_aer_uncorrect_error_status(pdev);
+       be_clear_all_error(adapter);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
index 5722bc61fa582d0796a4ce37b2e8912c4840cc59..cf579fb39bc5cf575f217902b725052d34456d0e 100644 (file)
@@ -1147,8 +1147,6 @@ static int ethoc_remove(struct platform_device *pdev)
        struct net_device *netdev = platform_get_drvdata(pdev);
        struct ethoc *priv = netdev_priv(netdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (netdev) {
                netif_napi_del(&priv->napi);
                phy_disconnect(priv->phy);
index b8974b9e3b479cd90b90bff7235245629ab59635..5918c68916946acc9cf14202b0e28d823a7515c3 100644 (file)
@@ -21,7 +21,6 @@ if NET_VENDOR_FARADAY
 config FTMAC100
        tristate "Faraday FTMAC100 10/100 Ethernet support"
        depends on ARM
-       select NET_CORE
        select MII
        ---help---
          This driver supports the FTMAC100 10/100 Ethernet controller
index 21b85fb7d05f68da25f7980b937e61b1c4d9de56..934e1ae279f01ac85ce0ddb221470a9aeebab7d5 100644 (file)
@@ -1311,7 +1311,6 @@ err_ioremap:
        release_resource(priv->res);
 err_req_mem:
        netif_napi_del(&priv->napi);
-       platform_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 err_alloc_etherdev:
        return err;
@@ -1335,7 +1334,6 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
        release_resource(priv->res);
 
        netif_napi_del(&priv->napi);
-       platform_set_drvdata(pdev, NULL);
        free_netdev(netdev);
        return 0;
 }
index a6eda8d83138bd791769ba9e45bccf5c1f88c00e..4658f4cc196933226f0488ab15d0a956e40354e4 100644 (file)
@@ -1149,7 +1149,6 @@ err_ioremap:
        release_resource(priv->res);
 err_req_mem:
        netif_napi_del(&priv->napi);
-       platform_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 err_alloc_etherdev:
        return err;
@@ -1169,7 +1168,6 @@ static int __exit ftmac100_remove(struct platform_device *pdev)
        release_resource(priv->res);
 
        netif_napi_del(&priv->napi);
-       platform_set_drvdata(pdev, NULL);
        free_netdev(netdev);
        return 0;
 }
index 9ce5b7185fda196f521d439a143b3021b56ff5c3..8362a0399afbb2f24fb8f34a64612e28ce1d7322 100644 (file)
 #define BM_MIIGSK_CFGR_RMII            0x01
 #define BM_MIIGSK_CFGR_FRCONT_10M      0x40
 
+#define RMON_T_DROP            0x200 /* Count of frames not cntd correctly */
+#define RMON_T_PACKETS         0x204 /* RMON TX packet count */
+#define RMON_T_BC_PKT          0x208 /* RMON TX broadcast pkts */
+#define RMON_T_MC_PKT          0x20C /* RMON TX multicast pkts */
+#define RMON_T_CRC_ALIGN       0x210 /* RMON TX pkts with CRC align err */
+#define RMON_T_UNDERSIZE       0x214 /* RMON TX pkts < 64 bytes, good CRC */
+#define RMON_T_OVERSIZE                0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
+#define RMON_T_FRAG            0x21C /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_JAB             0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
+#define RMON_T_COL             0x224 /* RMON TX collision count */
+#define RMON_T_P64             0x228 /* RMON TX 64 byte pkts */
+#define RMON_T_P65TO127                0x22C /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P128TO255       0x230 /* RMON TX 128 to 255 byte pkts */
+#define RMON_T_P256TO511       0x234 /* RMON TX 256 to 511 byte pkts */
+#define RMON_T_P512TO1023      0x238 /* RMON TX 512 to 1023 byte pkts */
+#define RMON_T_P1024TO2047     0x23C /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P_GTE2048       0x240 /* RMON TX pkts > 2048 bytes */
+#define RMON_T_OCTETS          0x244 /* RMON TX octets */
+#define IEEE_T_DROP            0x248 /* Count of frames not counted crtly */
+#define IEEE_T_FRAME_OK                0x24C /* Frames tx'd OK */
+#define IEEE_T_1COL            0x250 /* Frames tx'd with single collision */
+#define IEEE_T_MCOL            0x254 /* Frames tx'd with multiple collision */
+#define IEEE_T_DEF             0x258 /* Frames tx'd after deferral delay */
+#define IEEE_T_LCOL            0x25C /* Frames tx'd with late collision */
+#define IEEE_T_EXCOL           0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_MACERR          0x264 /* Frames tx'd with TX FIFO underrun */
+#define IEEE_T_CSERR           0x268 /* Frames tx'd with carrier sense err */
+#define IEEE_T_SQE             0x26C /* Frames tx'd with SQE err */
+#define IEEE_T_FDXFC           0x270 /* Flow control pause frames tx'd */
+#define IEEE_T_OCTETS_OK       0x274 /* Octet count for frames tx'd w/o err */
+#define RMON_R_PACKETS         0x284 /* RMON RX packet count */
+#define RMON_R_BC_PKT          0x288 /* RMON RX broadcast pkts */
+#define RMON_R_MC_PKT          0x28C /* RMON RX multicast pkts */
+#define RMON_R_CRC_ALIGN       0x290 /* RMON RX pkts with CRC alignment err */
+#define RMON_R_UNDERSIZE       0x294 /* RMON RX pkts < 64 bytes, good CRC */
+#define RMON_R_OVERSIZE                0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
+#define RMON_R_FRAG            0x29C /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB             0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O         0x2A4 /* Reserved */
+#define RMON_R_P64             0x2A8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127                0x2AC /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255       0x2B0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511       0x2B4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023      0x2B8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047     0x2BC /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048       0x2C0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS          0x2C4 /* RMON RX octets */
+#define IEEE_R_DROP            0x2C8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK                0x2CC /* Frames rx'd OK */
+#define IEEE_R_CRC             0x2D0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN           0x2D4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR          0x2D8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC           0x2DC /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK       0x2E0 /* Octet cnt for frames rx'd w/o err */
+
 #else
 
 #define FEC_ECNTRL             0x000 /* Ethernet control reg */
@@ -272,9 +327,10 @@ struct fec_enet_private {
        int hwts_tx_en;
        struct timer_list time_keep;
        struct fec_enet_delayed_work delay_work;
+       struct regulator *reg_phy;
 };
 
-void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void fec_ptp_init(struct platform_device *pdev);
 void fec_ptp_start_cyclecounter(struct net_device *ndev);
 int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
 
index aff0310a778bf7afb545c4b53f995770f7f03ec2..ed6180e7db2fa028edff83621ffd8b9d3e2faf9c 100644 (file)
@@ -53,7 +53,6 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_net.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/regulator/consumer.h>
 
 #include <asm/cacheflush.h>
@@ -87,6 +86,8 @@
 #define FEC_QUIRK_HAS_GBIT             (1 << 3)
 /* Controller has extend desc buffer */
 #define FEC_QUIRK_HAS_BUFDESC_EX       (1 << 4)
+/* Controller has hardware checksum support */
+#define FEC_QUIRK_HAS_CSUM             (1 << 5)
 
 static struct platform_device_id fec_devtype[] = {
        {
@@ -105,9 +106,9 @@ static struct platform_device_id fec_devtype[] = {
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
-                               FEC_QUIRK_HAS_BUFDESC_EX,
+                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM,
        }, {
-               .name = "mvf-fec",
+               .name = "mvf600-fec",
                .driver_data = FEC_QUIRK_ENET_MAC,
        }, {
                /* sentinel */
@@ -120,7 +121,7 @@ enum imx_fec_type {
        IMX27_FEC,      /* runs on i.mx27/35/51 */
        IMX28_FEC,
        IMX6Q_FEC,
-       MVF_FEC,
+       MVF600_FEC,
 };
 
 static const struct of_device_id fec_dt_ids[] = {
@@ -128,7 +129,7 @@ static const struct of_device_id fec_dt_ids[] = {
        { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
        { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
        { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
-       { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], },
+       { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -241,7 +242,7 @@ static void *swap_buffer(void *bufaddr, int len)
        int i;
        unsigned int *buf = bufaddr;
 
-       for (i = 0; i < (len + 3) / 4; i++, buf++)
+       for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
                *buf = cpu_to_be32(*buf);
 
        return bufaddr;
@@ -449,7 +450,7 @@ fec_restart(struct net_device *ndev, int duplex)
                netif_device_detach(ndev);
                napi_disable(&fep->napi);
                netif_stop_queue(ndev);
-               netif_tx_lock(ndev);
+               netif_tx_lock_bh(ndev);
        }
 
        /* Whack a reset.  We should wait for this. */
@@ -603,6 +604,14 @@ fec_restart(struct net_device *ndev, int duplex)
        if (fep->bufdesc_ex)
                ecntl |= (1 << 4);
 
+#ifndef CONFIG_M5272
+       /* Disable, clear, and enable the MIB */
+       writel(1 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
+       for (i = RMON_T_DROP; i < IEEE_R_OCTETS_OK; i++)
+               writel(0, fep->hwp + i);
+       writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
+#endif
+
        /* And last, enable the transmit and receive processing */
        writel(ecntl, fep->hwp + FEC_ECNTRL);
        writel(0, fep->hwp + FEC_R_DES_ACTIVE);
@@ -614,10 +623,10 @@ fec_restart(struct net_device *ndev, int duplex)
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
        if (netif_running(ndev)) {
-               netif_device_attach(ndev);
-               napi_enable(&fep->napi);
+               netif_tx_unlock_bh(ndev);
                netif_wake_queue(ndev);
-               netif_tx_unlock(ndev);
+               napi_enable(&fep->napi);
+               netif_device_attach(ndev);
        }
 }
 
@@ -1036,6 +1045,18 @@ static void fec_get_mac(struct net_device *ndev)
                iap = &tmpaddr[0];
        }
 
+       /*
+        * 5) random mac address
+        */
+       if (!is_valid_ether_addr(iap)) {
+               /* Report it and use a random ethernet address instead */
+               netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+               eth_hw_addr_random(ndev);
+               netdev_info(ndev, "Using random MAC address: %pM\n",
+                           ndev->dev_addr);
+               return;
+       }
+
        memcpy(ndev->dev_addr, iap, ETH_ALEN);
 
        /* Adjust MAC if using macaddr */
@@ -1422,6 +1443,118 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
        return 0;
 }
 
+#ifndef CONFIG_M5272
+static const struct fec_stat {
+       char name[ETH_GSTRING_LEN];
+       u16 offset;
+} fec_stats[] = {
+       /* RMON TX */
+       { "tx_dropped", RMON_T_DROP },
+       { "tx_packets", RMON_T_PACKETS },
+       { "tx_broadcast", RMON_T_BC_PKT },
+       { "tx_multicast", RMON_T_MC_PKT },
+       { "tx_crc_errors", RMON_T_CRC_ALIGN },
+       { "tx_undersize", RMON_T_UNDERSIZE },
+       { "tx_oversize", RMON_T_OVERSIZE },
+       { "tx_fragment", RMON_T_FRAG },
+       { "tx_jabber", RMON_T_JAB },
+       { "tx_collision", RMON_T_COL },
+       { "tx_64byte", RMON_T_P64 },
+       { "tx_65to127byte", RMON_T_P65TO127 },
+       { "tx_128to255byte", RMON_T_P128TO255 },
+       { "tx_256to511byte", RMON_T_P256TO511 },
+       { "tx_512to1023byte", RMON_T_P512TO1023 },
+       { "tx_1024to2047byte", RMON_T_P1024TO2047 },
+       { "tx_GTE2048byte", RMON_T_P_GTE2048 },
+       { "tx_octets", RMON_T_OCTETS },
+
+       /* IEEE TX */
+       { "IEEE_tx_drop", IEEE_T_DROP },
+       { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
+       { "IEEE_tx_1col", IEEE_T_1COL },
+       { "IEEE_tx_mcol", IEEE_T_MCOL },
+       { "IEEE_tx_def", IEEE_T_DEF },
+       { "IEEE_tx_lcol", IEEE_T_LCOL },
+       { "IEEE_tx_excol", IEEE_T_EXCOL },
+       { "IEEE_tx_macerr", IEEE_T_MACERR },
+       { "IEEE_tx_cserr", IEEE_T_CSERR },
+       { "IEEE_tx_sqe", IEEE_T_SQE },
+       { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
+       { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
+
+       /* RMON RX */
+       { "rx_packets", RMON_R_PACKETS },
+       { "rx_broadcast", RMON_R_BC_PKT },
+       { "rx_multicast", RMON_R_MC_PKT },
+       { "rx_crc_errors", RMON_R_CRC_ALIGN },
+       { "rx_undersize", RMON_R_UNDERSIZE },
+       { "rx_oversize", RMON_R_OVERSIZE },
+       { "rx_fragment", RMON_R_FRAG },
+       { "rx_jabber", RMON_R_JAB },
+       { "rx_64byte", RMON_R_P64 },
+       { "rx_65to127byte", RMON_R_P65TO127 },
+       { "rx_128to255byte", RMON_R_P128TO255 },
+       { "rx_256to511byte", RMON_R_P256TO511 },
+       { "rx_512to1023byte", RMON_R_P512TO1023 },
+       { "rx_1024to2047byte", RMON_R_P1024TO2047 },
+       { "rx_GTE2048byte", RMON_R_P_GTE2048 },
+       { "rx_octets", RMON_R_OCTETS },
+
+       /* IEEE RX */
+       { "IEEE_rx_drop", IEEE_R_DROP },
+       { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
+       { "IEEE_rx_crc", IEEE_R_CRC },
+       { "IEEE_rx_align", IEEE_R_ALIGN },
+       { "IEEE_rx_macerr", IEEE_R_MACERR },
+       { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
+       { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
+};
+
+static void fec_enet_get_ethtool_stats(struct net_device *dev,
+       struct ethtool_stats *stats, u64 *data)
+{
+       struct fec_enet_private *fep = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+               data[i] = readl(fep->hwp + fec_stats[i].offset);
+}
+
+static void fec_enet_get_strings(struct net_device *netdev,
+       u32 stringset, u8 *data)
+{
+       int i;
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                               fec_stats[i].name, ETH_GSTRING_LEN);
+               break;
+       }
+}
+
+static int fec_enet_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(fec_stats);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+#endif
+
+static int fec_enet_nway_reset(struct net_device *dev)
+{
+       struct fec_enet_private *fep = netdev_priv(dev);
+       struct phy_device *phydev = fep->phy_dev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return genphy_restart_aneg(phydev);
+}
+
 static const struct ethtool_ops fec_enet_ethtool_ops = {
        .get_pauseparam         = fec_enet_get_pauseparam,
        .set_pauseparam         = fec_enet_set_pauseparam,
@@ -1430,6 +1563,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
        .get_drvinfo            = fec_enet_get_drvinfo,
        .get_link               = ethtool_op_get_link,
        .get_ts_info            = fec_enet_get_ts_info,
+       .nway_reset             = fec_enet_nway_reset,
+#ifndef CONFIG_M5272
+       .get_ethtool_stats      = fec_enet_get_ethtool_stats,
+       .get_strings            = fec_enet_get_strings,
+       .get_sset_count         = fec_enet_get_sset_count,
+#endif
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -1744,6 +1883,8 @@ static const struct net_device_ops fec_netdev_ops = {
 static int fec_enet_init(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
+       const struct platform_device_id *id_entry =
+                               platform_get_device_id(fep->pdev);
        struct bufdesc *cbd_base;
 
        /* Allocate memory for buffer descriptors. */
@@ -1775,12 +1916,14 @@ static int fec_enet_init(struct net_device *ndev)
        writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
        netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
 
-       /* enable hw accelerator */
-       ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
-                       | NETIF_F_RXCSUM);
-       ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
-                       | NETIF_F_RXCSUM);
-       fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+       if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+               /* enable hw accelerator */
+               ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+                               | NETIF_F_RXCSUM);
+               ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+                               | NETIF_F_RXCSUM);
+               fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+       }
 
        fec_restart(ndev, 0);
 
@@ -1835,8 +1978,6 @@ fec_probe(struct platform_device *pdev)
        struct resource *r;
        const struct of_device_id *of_id;
        static int dev_id;
-       struct pinctrl *pinctrl;
-       struct regulator *reg_phy;
 
        of_id = of_match_device(fec_dt_ids, &pdev->dev);
        if (of_id)
@@ -1861,17 +2002,17 @@ fec_probe(struct platform_device *pdev)
            (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
 
-       fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
+       fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(fep->hwp)) {
+               ret = PTR_ERR(fep->hwp);
+               goto failed_ioremap;
+       }
+
        fep->pdev = pdev;
        fep->dev_id = dev_id++;
 
        fep->bufdesc_ex = 0;
 
-       if (!fep->hwp) {
-               ret = -ENOMEM;
-               goto failed_ioremap;
-       }
-
        platform_set_drvdata(pdev, ndev);
 
        ret = of_get_phy_mode(pdev->dev.of_node);
@@ -1885,12 +2026,6 @@ fec_probe(struct platform_device *pdev)
                fep->phy_interface = ret;
        }
 
-       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(pinctrl)) {
-               ret = PTR_ERR(pinctrl);
-               goto failed_pin;
-       }
-
        fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
        if (IS_ERR(fep->clk_ipg)) {
                ret = PTR_ERR(fep->clk_ipg);
@@ -1921,20 +2056,22 @@ fec_probe(struct platform_device *pdev)
        clk_prepare_enable(fep->clk_enet_out);
        clk_prepare_enable(fep->clk_ptp);
 
-       reg_phy = devm_regulator_get(&pdev->dev, "phy");
-       if (!IS_ERR(reg_phy)) {
-               ret = regulator_enable(reg_phy);
+       fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+       if (!IS_ERR(fep->reg_phy)) {
+               ret = regulator_enable(fep->reg_phy);
                if (ret) {
                        dev_err(&pdev->dev,
                                "Failed to enable phy regulator: %d\n", ret);
                        goto failed_regulator;
                }
+       } else {
+               fep->reg_phy = NULL;
        }
 
        fec_reset_phy(pdev);
 
        if (fep->bufdesc_ex)
-               fec_ptp_init(ndev, pdev);
+               fec_ptp_init(pdev);
 
        ret = fec_enet_init(ndev);
        if (ret)
@@ -1978,19 +2115,20 @@ fec_probe(struct platform_device *pdev)
 failed_register:
        fec_enet_mii_remove(fep);
 failed_mii_init:
-failed_init:
+failed_irq:
        for (i = 0; i < FEC_IRQ_NUM; i++) {
                irq = platform_get_irq(pdev, i);
                if (irq > 0)
                        free_irq(irq, ndev);
        }
-failed_irq:
+failed_init:
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
 failed_regulator:
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
        clk_disable_unprepare(fep->clk_enet_out);
        clk_disable_unprepare(fep->clk_ptp);
-failed_pin:
 failed_clk:
 failed_ioremap:
        free_netdev(ndev);
@@ -2009,21 +2147,21 @@ fec_drv_remove(struct platform_device *pdev)
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
        del_timer_sync(&fep->time_keep);
+       for (i = 0; i < FEC_IRQ_NUM; i++) {
+               int irq = platform_get_irq(pdev, i);
+               if (irq > 0)
+                       free_irq(irq, ndev);
+       }
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
        clk_disable_unprepare(fep->clk_ptp);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
        clk_disable_unprepare(fep->clk_enet_out);
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
-       for (i = 0; i < FEC_IRQ_NUM; i++) {
-               int irq = platform_get_irq(pdev, i);
-               if (irq > 0)
-                       free_irq(irq, ndev);
-       }
        free_netdev(ndev);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
@@ -2042,6 +2180,9 @@ fec_suspend(struct device *dev)
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
 
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
+
        return 0;
 }
 
@@ -2050,6 +2191,13 @@ fec_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int ret;
+
+       if (fep->reg_phy) {
+               ret = regulator_enable(fep->reg_phy);
+               if (ret)
+                       return ret;
+       }
 
        clk_prepare_enable(fep->clk_enet_out);
        clk_prepare_enable(fep->clk_ahb);
index 9bc15e2365bbc924631da31872b10ae94e0062c7..9947765e90c547e97eee26edbc55aa06fd436329 100644 (file)
@@ -981,7 +981,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
                goto err_node;
 
        /* We're done ! */
-       dev_set_drvdata(&op->dev, ndev);
+       platform_set_drvdata(op, ndev);
        netdev_info(ndev, "%s MAC %pM\n",
                    op->dev.of_node->full_name, ndev->dev_addr);
 
@@ -1010,7 +1010,7 @@ mpc52xx_fec_remove(struct platform_device *op)
        struct net_device *ndev;
        struct mpc52xx_fec_priv *priv;
 
-       ndev = dev_get_drvdata(&op->dev);
+       ndev = platform_get_drvdata(op);
        priv = netdev_priv(ndev);
 
        unregister_netdev(ndev);
@@ -1030,14 +1030,13 @@ mpc52xx_fec_remove(struct platform_device *op)
 
        free_netdev(ndev);
 
-       dev_set_drvdata(&op->dev, NULL);
        return 0;
 }
 
 #ifdef CONFIG_PM
 static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
 {
-       struct net_device *dev = dev_get_drvdata(&op->dev);
+       struct net_device *dev = platform_get_drvdata(op);
 
        if (netif_running(dev))
                mpc52xx_fec_close(dev);
@@ -1047,7 +1046,7 @@ static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state
 
 static int mpc52xx_fec_of_resume(struct platform_device *op)
 {
-       struct net_device *dev = dev_get_drvdata(&op->dev);
+       struct net_device *dev = platform_get_drvdata(op);
 
        mpc52xx_fec_hw_init(dev);
        mpc52xx_fec_reset_stats(dev);
index 25fc960cbf0e9bc2e2f9bdd9e01e6f91eb2ebffd..5007e4f9fff91705d93e875ad2c15108e915ebb3 100644 (file)
@@ -347,8 +347,9 @@ static void fec_time_keep(unsigned long _data)
  * cyclecounter init routine and exits.
  */
 
-void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+void fec_ptp_init(struct platform_device *pdev)
 {
+       struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
        fep->ptp_caps.owner = THIS_MODULE;
index 268414d9f2cbfc84272ed6184ea34ab211cdc537..be92229f2c2a52f447c96a086e4683cc0c341e68 100644 (file)
@@ -1,7 +1,6 @@
 config FS_ENET
        tristate "Freescale Ethernet Driver"
        depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
-       select NET_CORE
        select MII
        select PHYLIB
 
index edc120094c340bbe61dbd33f4a3cbbf9b9a3e8d8..8de53a14a6f48895bc107c6f8ec40910d35ced7b 100644 (file)
@@ -1048,7 +1048,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
        }
 
        SET_NETDEV_DEV(ndev, &ofdev->dev);
-       dev_set_drvdata(&ofdev->dev, ndev);
+       platform_set_drvdata(ofdev, ndev);
 
        fep = netdev_priv(ndev);
        fep->dev = &ofdev->dev;
@@ -1106,7 +1106,6 @@ out_cleanup_data:
        fep->ops->cleanup_data(ndev);
 out_free_dev:
        free_netdev(ndev);
-       dev_set_drvdata(&ofdev->dev, NULL);
 out_put:
        of_node_put(fpi->phy_node);
 out_free_fpi:
@@ -1116,7 +1115,7 @@ out_free_fpi:
 
 static int fs_enet_remove(struct platform_device *ofdev)
 {
-       struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *ndev = platform_get_drvdata(ofdev);
        struct fs_enet_private *fep = netdev_priv(ndev);
 
        unregister_netdev(ndev);
index 2bafbd37c247ab52802e64b531ac2e7ccc5e148a..844ecfa84d175f9b2dbcec1e5c724ac22b4ac395 100644 (file)
@@ -179,7 +179,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
        }
 
        new_bus->parent = &ofdev->dev;
-       dev_set_drvdata(&ofdev->dev, new_bus);
+       platform_set_drvdata(ofdev, new_bus);
 
        ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
        if (ret)
@@ -188,7 +188,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
        return 0;
 
 out_free_irqs:
-       dev_set_drvdata(&ofdev->dev, NULL);
        kfree(new_bus->irq);
 out_unmap_regs:
        iounmap(bitbang->dir);
@@ -202,11 +201,10 @@ out:
 
 static int fs_enet_mdio_remove(struct platform_device *ofdev)
 {
-       struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
+       struct mii_bus *bus = platform_get_drvdata(ofdev);
        struct bb_info *bitbang = bus->priv;
 
        mdiobus_unregister(bus);
-       dev_set_drvdata(&ofdev->dev, NULL);
        kfree(bus->irq);
        free_mdio_bitbang(bus);
        iounmap(bitbang->dir);
index 18e8ef203736bf2154967599a102fd454e7cc05a..2f1c46a12f0516bbcd122d1ca70a7830ea2223dd 100644 (file)
@@ -180,7 +180,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
        }
 
        new_bus->parent = &ofdev->dev;
-       dev_set_drvdata(&ofdev->dev, new_bus);
+       platform_set_drvdata(ofdev, new_bus);
 
        ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
        if (ret)
@@ -189,7 +189,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
        return 0;
 
 out_free_irqs:
-       dev_set_drvdata(&ofdev->dev, NULL);
        kfree(new_bus->irq);
 out_unmap_regs:
        iounmap(fec->fecp);
@@ -204,11 +203,10 @@ out:
 
 static int fs_enet_mdio_remove(struct platform_device *ofdev)
 {
-       struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
+       struct mii_bus *bus = platform_get_drvdata(ofdev);
        struct fec_info *fec = bus->priv;
 
        mdiobus_unregister(bus);
-       dev_set_drvdata(&ofdev->dev, NULL);
        kfree(bus->irq);
        iounmap(fec->fecp);
        kfree(fec);
index 2375a01715a0e93fa8f78c89dfcc90a88325585e..8d2db7b808b7cb4321dbc6edeb12ebab94b7b0f0 100644 (file)
@@ -128,6 +128,7 @@ static void gfar_set_multi(struct net_device *dev);
 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 static void gfar_configure_serdes(struct net_device *dev);
 static int gfar_poll(struct napi_struct *napi, int budget);
+static int gfar_poll_sq(struct napi_struct *napi, int budget);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void gfar_netpoll(struct net_device *dev);
 #endif
@@ -1000,7 +1001,7 @@ static int gfar_probe(struct platform_device *ofdev)
        spin_lock_init(&priv->bflock);
        INIT_WORK(&priv->reset_task, gfar_reset_task);
 
-       dev_set_drvdata(&ofdev->dev, priv);
+       platform_set_drvdata(ofdev, priv);
        regs = priv->gfargrp[0].regs;
 
        gfar_detect_errata(priv);
@@ -1038,9 +1039,13 @@ static int gfar_probe(struct platform_device *ofdev)
        dev->ethtool_ops = &gfar_ethtool_ops;
 
        /* Register for napi ...We are registering NAPI for each grp */
-       for (i = 0; i < priv->num_grps; i++)
-               netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+       if (priv->mode == SQ_SG_MODE)
+               netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
                               GFAR_DEV_WEIGHT);
+       else
+               for (i = 0; i < priv->num_grps; i++)
+                       netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+                                      GFAR_DEV_WEIGHT);
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -1240,15 +1245,13 @@ register_fail:
 
 static int gfar_remove(struct platform_device *ofdev)
 {
-       struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
+       struct gfar_private *priv = platform_get_drvdata(ofdev);
 
        if (priv->phy_node)
                of_node_put(priv->phy_node);
        if (priv->tbi_node)
                of_node_put(priv->tbi_node);
 
-       dev_set_drvdata(&ofdev->dev, NULL);
-
        unregister_netdev(priv->ndev);
        unmap_group_regs(priv);
        free_gfar_dev(priv);
@@ -2825,6 +2828,48 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
        return howmany;
 }
 
+static int gfar_poll_sq(struct napi_struct *napi, int budget)
+{
+       struct gfar_priv_grp *gfargrp =
+               container_of(napi, struct gfar_priv_grp, napi);
+       struct gfar __iomem *regs = gfargrp->regs;
+       struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
+       struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
+       int work_done = 0;
+
+       /* Clear IEVENT, so interrupts aren't called again
+        * because of the packets that have already arrived
+        */
+       gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+
+       /* run Tx cleanup to completion */
+       if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+               gfar_clean_tx_ring(tx_queue);
+
+       work_done = gfar_clean_rx_ring(rx_queue, budget);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               /* Clear the halt bit in RSTAT */
+               gfar_write(&regs->rstat, gfargrp->rstat);
+
+               gfar_write(&regs->imask, IMASK_DEFAULT);
+
+               /* If we are coalescing interrupts, update the timer
+                * Otherwise, clear it
+                */
+               gfar_write(&regs->txic, 0);
+               if (likely(tx_queue->txcoalescing))
+                       gfar_write(&regs->txic, tx_queue->txic);
+
+               gfar_write(&regs->rxic, 0);
+               if (unlikely(rx_queue->rxcoalescing))
+                       gfar_write(&regs->rxic, rx_queue->rxic);
+       }
+
+       return work_done;
+}
+
 static int gfar_poll(struct napi_struct *napi, int budget)
 {
        struct gfar_priv_grp *gfargrp =
index 576e4b858fce09d7bd00f1a7492daf598bbe1741..098f133908ae016058f326225e2ed58b9709d592 100644 (file)
@@ -519,11 +519,12 @@ static int gianfar_ptp_probe(struct platform_device *dev)
        }
        gfar_phc_index = ptp_clock_index(etsects->clock);
 
-       dev_set_drvdata(&dev->dev, etsects);
+       platform_set_drvdata(dev, etsects);
 
        return 0;
 
 no_clock:
+       iounmap(etsects->regs);
 no_ioremap:
        release_resource(etsects->rsrc);
 no_resource:
@@ -536,7 +537,7 @@ no_memory:
 
 static int gianfar_ptp_remove(struct platform_device *dev)
 {
-       struct etsects *etsects = dev_get_drvdata(&dev->dev);
+       struct etsects *etsects = platform_get_drvdata(dev);
 
        gfar_write(&etsects->regs->tmr_temask, 0);
        gfar_write(&etsects->regs->tmr_ctrl,   0);
index e04c59818f60ff2f9616599d34e301e97a2f65f5..3c43dac894ecddde910e2ce8e784c455944af915 100644 (file)
@@ -3564,7 +3564,7 @@ static void ucc_geth_timeout(struct net_device *dev)
 
 static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
 {
-       struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *ndev = platform_get_drvdata(ofdev);
        struct ucc_geth_private *ugeth = netdev_priv(ndev);
 
        if (!netif_running(ndev))
@@ -3592,7 +3592,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
 
 static int ucc_geth_resume(struct platform_device *ofdev)
 {
-       struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
+       struct net_device *ndev = platform_get_drvdata(ofdev);
        struct ucc_geth_private *ugeth = netdev_priv(ndev);
        int err;
 
index 418068b941b1cadea61271ba5cc52aff01aefc10..c1b6e7e31aac4f258dd21049c4e29b7f006e933a 100644 (file)
@@ -227,7 +227,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
                goto err_registration;
        }
 
-       dev_set_drvdata(&pdev->dev, bus);
+       platform_set_drvdata(pdev, bus);
 
        return 0;
 
@@ -242,7 +242,7 @@ err_ioremap:
 
 static int xgmac_mdio_remove(struct platform_device *pdev)
 {
-       struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+       struct mii_bus *bus = platform_get_drvdata(pdev);
 
        mdiobus_unregister(bus);
        iounmap(bus->priv);
index 6529d31595a7e595f4ebd9e55929d017253c2c17..563a1ac71dbc8aa332657a9625120e90253b9aa5 100644 (file)
@@ -5,8 +5,7 @@
 config NET_VENDOR_IBM
        bool "IBM devices"
        default y
-       depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \
-                  (IBMEBUS && SPARSEMEM)
+       depends on PPC_PSERIES || PPC_DCR || (IBMEBUS && SPARSEMEM)
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
index 90ea0b1673ca45300f902f0f038f0a4a7061b745..0605e76c7eddfbd00b218faa08ed6aeb9e79ac51 100644 (file)
@@ -3289,7 +3289,7 @@ static int ehea_probe_adapter(struct platform_device *dev,
 
        adapter->pd = EHEA_PD_ID;
 
-       dev_set_drvdata(&dev->dev, adapter);
+       platform_set_drvdata(dev, adapter);
 
 
        /* initialize adapter and ports */
@@ -3360,7 +3360,7 @@ out:
 
 static int ehea_remove(struct platform_device *dev)
 {
-       struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
+       struct ehea_adapter *adapter = platform_get_drvdata(dev);
        int i;
 
        for (i = 0; i < EHEA_MAX_PORTS; i++)
index 4989481c19f01b6dad9b560b535b6127da416eee..d300a0c0eafc0521a4e86688abd7e54afd0ce58e 100644 (file)
@@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev)
        }
 
 #ifdef CONFIG_PPC_DCR_NATIVE
-       /* Enable internal clock source */
-       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
-               dcri_clrset(SDR0, SDR0_ETH_CFG,
-                           0, SDR0_ETH_CFG_ECS << dev->cell_index);
+       /*
+        * PPC460EX/GT Embedded Processor Advanced User's Manual
+        * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
+        * Note: The PHY must provide a TX Clk in order to perform a soft reset
+        * of the EMAC. If none is present, select the internal clock
+        * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
+        * After a soft reset, select the external clock.
+        */
+       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+               if (dev->phy_address == 0xffffffff &&
+                   dev->phy_map == 0xffffffff) {
+                       /* No PHY: select internal loop clock before reset */
+                       dcri_clrset(SDR0, SDR0_ETH_CFG,
+                                   0, SDR0_ETH_CFG_ECS << dev->cell_index);
+               } else {
+                       /* PHY present: select external clock before reset */
+                       dcri_clrset(SDR0, SDR0_ETH_CFG,
+                                   SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+               }
+       }
 #endif
 
        out_be32(&p->mr0, EMAC_MR0_SRST);
@@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev)
                --n;
 
 #ifdef CONFIG_PPC_DCR_NATIVE
-        /* Enable external clock source */
-       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
-               dcri_clrset(SDR0, SDR0_ETH_CFG,
-                           SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+               if (dev->phy_address == 0xffffffff &&
+                   dev->phy_map == 0xffffffff) {
+                       /* No PHY: restore external clock source after reset */
+                       dcri_clrset(SDR0, SDR0_ETH_CFG,
+                                   SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+               }
+       }
 #endif
 
        if (n) {
index 610ed223d1dbd0e8c8136cdb5bb7bd46a433c8c2..856ea66c9223536bfd33cd46813d49b260bb49b0 100644 (file)
@@ -696,7 +696,7 @@ static int mal_probe(struct platform_device *ofdev)
 
        /* Advertise this instance to the rest of the world */
        wmb();
-       dev_set_drvdata(&ofdev->dev, mal);
+       platform_set_drvdata(ofdev, mal);
 
        mal_dbg_register(mal);
 
@@ -722,7 +722,7 @@ static int mal_probe(struct platform_device *ofdev)
 
 static int mal_remove(struct platform_device *ofdev)
 {
-       struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
+       struct mal_instance *mal = platform_get_drvdata(ofdev);
 
        MAL_DBG(mal, "remove" NL);
 
@@ -735,8 +735,6 @@ static int mal_remove(struct platform_device *ofdev)
                       "mal%d: commac list is not empty on remove!\n",
                       mal->index);
 
-       dev_set_drvdata(&ofdev->dev, NULL);
-
        free_irq(mal->serr_irq, mal);
        free_irq(mal->txde_irq, mal);
        free_irq(mal->txeob_irq, mal);
index 39251765b55d444c8dbc0ab0e380e89546d4a964..c47e23d6eeaa0b57752d4ca047567178e1d37026 100644 (file)
@@ -95,7 +95,7 @@ static inline u32 rgmii_mode_mask(int mode, int input)
 
 int rgmii_attach(struct platform_device *ofdev, int input, int mode)
 {
-       struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct rgmii_instance *dev = platform_get_drvdata(ofdev);
        struct rgmii_regs __iomem *p = dev->base;
 
        RGMII_DBG(dev, "attach(%d)" NL, input);
@@ -124,7 +124,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
 
 void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
 {
-       struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct rgmii_instance *dev = platform_get_drvdata(ofdev);
        struct rgmii_regs __iomem *p = dev->base;
        u32 ssr;
 
@@ -146,7 +146,7 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
 
 void rgmii_get_mdio(struct platform_device *ofdev, int input)
 {
-       struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct rgmii_instance *dev = platform_get_drvdata(ofdev);
        struct rgmii_regs __iomem *p = dev->base;
        u32 fer;
 
@@ -167,7 +167,7 @@ void rgmii_get_mdio(struct platform_device *ofdev, int input)
 
 void rgmii_put_mdio(struct platform_device *ofdev, int input)
 {
-       struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct rgmii_instance *dev = platform_get_drvdata(ofdev);
        struct rgmii_regs __iomem *p = dev->base;
        u32 fer;
 
@@ -188,7 +188,7 @@ void rgmii_put_mdio(struct platform_device *ofdev, int input)
 
 void rgmii_detach(struct platform_device *ofdev, int input)
 {
-       struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct rgmii_instance *dev = platform_get_drvdata(ofdev);
        struct rgmii_regs __iomem *p;
 
        BUG_ON(!dev || dev->users == 0);
@@ -214,7 +214,7 @@ int rgmii_get_regs_len(struct platform_device *ofdev)
 
 void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
 {
-       struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct rgmii_instance *dev = platform_get_drvdata(ofdev);
        struct emac_ethtool_regs_subhdr *hdr = buf;
        struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
 
@@ -279,7 +279,7 @@ static int rgmii_probe(struct platform_device *ofdev)
               (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
 
        wmb();
-       dev_set_drvdata(&ofdev->dev, dev);
+       platform_set_drvdata(ofdev, dev);
 
        return 0;
 
@@ -291,9 +291,7 @@ static int rgmii_probe(struct platform_device *ofdev)
 
 static int rgmii_remove(struct platform_device *ofdev)
 {
-       struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
-
-       dev_set_drvdata(&ofdev->dev, NULL);
+       struct rgmii_instance *dev = platform_get_drvdata(ofdev);
 
        WARN_ON(dev->users != 0);
 
index 795f1393e2b6d006fd568e91d22bfd58dc7d08d4..c231a4a32c4dc90d341b10c67c194c1025e4c686 100644 (file)
@@ -25,7 +25,7 @@
 
 int tah_attach(struct platform_device *ofdev, int channel)
 {
-       struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct tah_instance *dev = platform_get_drvdata(ofdev);
 
        mutex_lock(&dev->lock);
        /* Reset has been done at probe() time... nothing else to do for now */
@@ -37,7 +37,7 @@ int tah_attach(struct platform_device *ofdev, int channel)
 
 void tah_detach(struct platform_device *ofdev, int channel)
 {
-       struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct tah_instance *dev = platform_get_drvdata(ofdev);
 
        mutex_lock(&dev->lock);
        --dev->users;
@@ -46,7 +46,7 @@ void tah_detach(struct platform_device *ofdev, int channel)
 
 void tah_reset(struct platform_device *ofdev)
 {
-       struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct tah_instance *dev = platform_get_drvdata(ofdev);
        struct tah_regs __iomem *p = dev->base;
        int n;
 
@@ -74,7 +74,7 @@ int tah_get_regs_len(struct platform_device *ofdev)
 
 void *tah_dump_regs(struct platform_device *ofdev, void *buf)
 {
-       struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct tah_instance *dev = platform_get_drvdata(ofdev);
        struct emac_ethtool_regs_subhdr *hdr = buf;
        struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
 
@@ -118,7 +118,7 @@ static int tah_probe(struct platform_device *ofdev)
                goto err_free;
        }
 
-       dev_set_drvdata(&ofdev->dev, dev);
+       platform_set_drvdata(ofdev, dev);
 
        /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
        tah_reset(ofdev);
@@ -137,9 +137,7 @@ static int tah_probe(struct platform_device *ofdev)
 
 static int tah_remove(struct platform_device *ofdev)
 {
-       struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
-
-       dev_set_drvdata(&ofdev->dev, NULL);
+       struct tah_instance *dev = platform_get_drvdata(ofdev);
 
        WARN_ON(dev->users != 0);
 
index f91202f42125d3088c6fbb69ac9c87b3417dfb07..4cdf286f7ee314dc55acb2494a74ede4468170e8 100644 (file)
@@ -84,7 +84,7 @@ static inline u32 zmii_mode_mask(int mode, int input)
 
 int zmii_attach(struct platform_device *ofdev, int input, int *mode)
 {
-       struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct zmii_instance *dev = platform_get_drvdata(ofdev);
        struct zmii_regs __iomem *p = dev->base;
 
        ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode);
@@ -150,7 +150,7 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
 
 void zmii_get_mdio(struct platform_device *ofdev, int input)
 {
-       struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct zmii_instance *dev = platform_get_drvdata(ofdev);
        u32 fer;
 
        ZMII_DBG2(dev, "get_mdio(%d)" NL, input);
@@ -163,7 +163,7 @@ void zmii_get_mdio(struct platform_device *ofdev, int input)
 
 void zmii_put_mdio(struct platform_device *ofdev, int input)
 {
-       struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct zmii_instance *dev = platform_get_drvdata(ofdev);
 
        ZMII_DBG2(dev, "put_mdio(%d)" NL, input);
        mutex_unlock(&dev->lock);
@@ -172,7 +172,7 @@ void zmii_put_mdio(struct platform_device *ofdev, int input)
 
 void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
 {
-       struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct zmii_instance *dev = platform_get_drvdata(ofdev);
        u32 ssr;
 
        mutex_lock(&dev->lock);
@@ -193,7 +193,7 @@ void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
 
 void zmii_detach(struct platform_device *ofdev, int input)
 {
-       struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct zmii_instance *dev = platform_get_drvdata(ofdev);
 
        BUG_ON(!dev || dev->users == 0);
 
@@ -218,7 +218,7 @@ int zmii_get_regs_len(struct platform_device *ofdev)
 
 void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
 {
-       struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+       struct zmii_instance *dev = platform_get_drvdata(ofdev);
        struct emac_ethtool_regs_subhdr *hdr = buf;
        struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
 
@@ -272,7 +272,7 @@ static int zmii_probe(struct platform_device *ofdev)
        printk(KERN_INFO
               "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
        wmb();
-       dev_set_drvdata(&ofdev->dev, dev);
+       platform_set_drvdata(ofdev, dev);
 
        return 0;
 
@@ -284,9 +284,7 @@ static int zmii_probe(struct platform_device *ofdev)
 
 static int zmii_remove(struct platform_device *ofdev)
 {
-       struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
-
-       dev_set_drvdata(&ofdev->dev, NULL);
+       struct zmii_instance *dev = platform_get_drvdata(ofdev);
 
        WARN_ON(dev->users != 0);
 
index 5119ef18953b5d98e0c1c612ff386b82445ce6f8..14a66e9d2e26ea85a1976fa5d396ef4fad25f85e 100644 (file)
@@ -5,7 +5,6 @@
 config IP1000
        tristate "IP1000 Gigabit Ethernet support"
        depends on PCI
-       select NET_CORE
        select MII
        ---help---
          This driver supports IP1000 gigabit Ethernet cards.
index 068d78151658819ba5a88895df55942d59134e83..1fde90b96685e878b12ffeee0abc8487845cb4da 100644 (file)
@@ -2298,15 +2298,4 @@ static struct pci_driver ipg_pci_driver = {
        .remove         = ipg_remove,
 };
 
-static int __init ipg_init_module(void)
-{
-       return pci_register_driver(&ipg_pci_driver);
-}
-
-static void __exit ipg_exit_module(void)
-{
-       pci_unregister_driver(&ipg_pci_driver);
-}
-
-module_init(ipg_init_module);
-module_exit(ipg_exit_module);
+module_pci_driver(ipg_pci_driver);
index 6ce027355fcf721932336aee3fce299f4fe7f716..abb300a3191293387d3250ad4c215a49164011ba 100644 (file)
@@ -195,57 +195,57 @@ enum ipg_regs {
 /* TFD data structure masks. */
 
 /* TFDList, TFC */
-#define        IPG_TFC_RSVD_MASK                       0x0000FFFF9FFFFFFF
-#define        IPG_TFC_FRAMEID                         0x000000000000FFFF
-#define        IPG_TFC_WORDALIGN                       0x0000000000030000
-#define        IPG_TFC_WORDALIGNTODWORD                0x0000000000000000
-#define        IPG_TFC_WORDALIGNTOWORD                 0x0000000000020000
-#define        IPG_TFC_WORDALIGNDISABLED               0x0000000000030000
-#define        IPG_TFC_TCPCHECKSUMENABLE               0x0000000000040000
-#define        IPG_TFC_UDPCHECKSUMENABLE               0x0000000000080000
-#define        IPG_TFC_IPCHECKSUMENABLE                0x0000000000100000
-#define        IPG_TFC_FCSAPPENDDISABLE                0x0000000000200000
-#define        IPG_TFC_TXINDICATE                      0x0000000000400000
-#define        IPG_TFC_TXDMAINDICATE                   0x0000000000800000
-#define        IPG_TFC_FRAGCOUNT                       0x000000000F000000
-#define        IPG_TFC_VLANTAGINSERT                   0x0000000010000000
-#define        IPG_TFC_TFDDONE                         0x0000000080000000
-#define        IPG_TFC_VID                             0x00000FFF00000000
-#define        IPG_TFC_CFI                             0x0000100000000000
-#define        IPG_TFC_USERPRIORITY                    0x0000E00000000000
+#define        IPG_TFC_RSVD_MASK                       0x0000FFFF9FFFFFFFULL
+#define        IPG_TFC_FRAMEID                         0x000000000000FFFFULL
+#define        IPG_TFC_WORDALIGN                       0x0000000000030000ULL
+#define        IPG_TFC_WORDALIGNTODWORD                0x0000000000000000ULL
+#define        IPG_TFC_WORDALIGNTOWORD                 0x0000000000020000ULL
+#define        IPG_TFC_WORDALIGNDISABLED               0x0000000000030000ULL
+#define        IPG_TFC_TCPCHECKSUMENABLE               0x0000000000040000ULL
+#define        IPG_TFC_UDPCHECKSUMENABLE               0x0000000000080000ULL
+#define        IPG_TFC_IPCHECKSUMENABLE                0x0000000000100000ULL
+#define        IPG_TFC_FCSAPPENDDISABLE                0x0000000000200000ULL
+#define        IPG_TFC_TXINDICATE                      0x0000000000400000ULL
+#define        IPG_TFC_TXDMAINDICATE                   0x0000000000800000ULL
+#define        IPG_TFC_FRAGCOUNT                       0x000000000F000000ULL
+#define        IPG_TFC_VLANTAGINSERT                   0x0000000010000000ULL
+#define        IPG_TFC_TFDDONE                         0x0000000080000000ULL
+#define        IPG_TFC_VID                             0x00000FFF00000000ULL
+#define        IPG_TFC_CFI                             0x0000100000000000ULL
+#define        IPG_TFC_USERPRIORITY                    0x0000E00000000000ULL
 
 /* TFDList, FragInfo */
-#define        IPG_TFI_RSVD_MASK                       0xFFFF00FFFFFFFFFF
-#define        IPG_TFI_FRAGADDR                        0x000000FFFFFFFFFF
-#define        IPG_TFI_FRAGLEN                         0xFFFF000000000000LL
+#define        IPG_TFI_RSVD_MASK                       0xFFFF00FFFFFFFFFFULL
+#define        IPG_TFI_FRAGADDR                        0x000000FFFFFFFFFFULL
+#define        IPG_TFI_FRAGLEN                         0xFFFF000000000000ULL
 
 /* RFD data structure masks. */
 
 /* RFDList, RFS */
-#define        IPG_RFS_RSVD_MASK                       0x0000FFFFFFFFFFFF
-#define        IPG_RFS_RXFRAMELEN                      0x000000000000FFFF
-#define        IPG_RFS_RXFIFOOVERRUN                   0x0000000000010000
-#define        IPG_RFS_RXRUNTFRAME                     0x0000000000020000
-#define        IPG_RFS_RXALIGNMENTERROR                0x0000000000040000
-#define        IPG_RFS_RXFCSERROR                      0x0000000000080000
-#define        IPG_RFS_RXOVERSIZEDFRAME                0x0000000000100000
-#define        IPG_RFS_RXLENGTHERROR                   0x0000000000200000
-#define        IPG_RFS_VLANDETECTED                    0x0000000000400000
-#define        IPG_RFS_TCPDETECTED                     0x0000000000800000
-#define        IPG_RFS_TCPERROR                        0x0000000001000000
-#define        IPG_RFS_UDPDETECTED                     0x0000000002000000
-#define        IPG_RFS_UDPERROR                        0x0000000004000000
-#define        IPG_RFS_IPDETECTED                      0x0000000008000000
-#define        IPG_RFS_IPERROR                         0x0000000010000000
-#define        IPG_RFS_FRAMESTART                      0x0000000020000000
-#define        IPG_RFS_FRAMEEND                        0x0000000040000000
-#define        IPG_RFS_RFDDONE                         0x0000000080000000
-#define        IPG_RFS_TCI                             0x0000FFFF00000000
+#define        IPG_RFS_RSVD_MASK                       0x0000FFFFFFFFFFFFULL
+#define        IPG_RFS_RXFRAMELEN                      0x000000000000FFFFULL
+#define        IPG_RFS_RXFIFOOVERRUN                   0x0000000000010000ULL
+#define        IPG_RFS_RXRUNTFRAME                     0x0000000000020000ULL
+#define        IPG_RFS_RXALIGNMENTERROR                0x0000000000040000ULL
+#define        IPG_RFS_RXFCSERROR                      0x0000000000080000ULL
+#define        IPG_RFS_RXOVERSIZEDFRAME                0x0000000000100000ULL
+#define        IPG_RFS_RXLENGTHERROR                   0x0000000000200000ULL
+#define        IPG_RFS_VLANDETECTED                    0x0000000000400000ULL
+#define        IPG_RFS_TCPDETECTED                     0x0000000000800000ULL
+#define        IPG_RFS_TCPERROR                        0x0000000001000000ULL
+#define        IPG_RFS_UDPDETECTED                     0x0000000002000000ULL
+#define        IPG_RFS_UDPERROR                        0x0000000004000000ULL
+#define        IPG_RFS_IPDETECTED                      0x0000000008000000ULL
+#define        IPG_RFS_IPERROR                         0x0000000010000000ULL
+#define        IPG_RFS_FRAMESTART                      0x0000000020000000ULL
+#define        IPG_RFS_FRAMEEND                        0x0000000040000000ULL
+#define        IPG_RFS_RFDDONE                         0x0000000080000000ULL
+#define        IPG_RFS_TCI                             0x0000FFFF00000000ULL
 
 /* RFDList, FragInfo */
-#define        IPG_RFI_RSVD_MASK                       0xFFFF00FFFFFFFFFF
-#define        IPG_RFI_FRAGADDR                        0x000000FFFFFFFFFF
-#define        IPG_RFI_FRAGLEN                         0xFFFF000000000000LL
+#define        IPG_RFI_RSVD_MASK                       0xFFFF00FFFFFFFFFFULL
+#define        IPG_RFI_FRAGADDR                        0x000000FFFFFFFFFFULL
+#define        IPG_RFI_FRAGLEN                         0xFFFF000000000000ULL
 
 /* I/O Register masks. */
 
index 05f7264c51f71899c308b19d616e2a8718de152a..f0e7ed20a750b56d1bc77e6bc9151847ac09ac5b 100644 (file)
@@ -20,7 +20,6 @@ if NET_VENDOR_INTEL
 config E100
        tristate "Intel(R) PRO/100+ support"
        depends on PCI
-       select NET_CORE
        select MII
        ---help---
          This driver supports Intel(R) PRO/100 family of adapters.
index b71c8502a2b347d8c770ac723d659a6115127e35..895450e9bb3cfe81d22f19148b4a52d9fc14352a 100644 (file)
@@ -66,17 +66,17 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
        s32 ret_val;
 
        if (hw->phy.media_type != e1000_media_type_copper) {
-               phy->type       = e1000_phy_none;
+               phy->type = e1000_phy_none;
                return 0;
        } else {
                phy->ops.power_up = e1000_power_up_phy_copper;
                phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
        }
 
-       phy->addr               = 1;
-       phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-       phy->reset_delay_us      = 100;
-       phy->type               = e1000_phy_gg82563;
+       phy->addr = 1;
+       phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+       phy->reset_delay_us = 100;
+       phy->type = e1000_phy_gg82563;
 
        /* This can only be done after all function pointers are setup. */
        ret_val = e1000e_get_phy_id(hw);
@@ -98,19 +98,19 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
        u32 eecd = er32(EECD);
        u16 size;
 
-       nvm->opcode_bits        = 8;
-       nvm->delay_usec  = 1;
+       nvm->opcode_bits = 8;
+       nvm->delay_usec = 1;
        switch (nvm->override) {
        case e1000_nvm_override_spi_large:
-               nvm->page_size    = 32;
+               nvm->page_size = 32;
                nvm->address_bits = 16;
                break;
        case e1000_nvm_override_spi_small:
-               nvm->page_size    = 8;
+               nvm->page_size = 8;
                nvm->address_bits = 8;
                break;
        default:
-               nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+               nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
                nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
                break;
        }
@@ -128,7 +128,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
        /* EEPROM access above 16k is unsupported */
        if (size > 14)
                size = 14;
-       nvm->word_size  = 1 << size;
+       nvm->word_size = 1 << size;
 
        return 0;
 }
@@ -859,7 +859,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
 
        /* Transmit Arbitration Control 0 */
        reg = er32(TARC(0));
-       reg &= ~(0xF << 27); /* 30:27 */
+       reg &= ~(0xF << 27);    /* 30:27 */
        if (hw->phy.media_type != e1000_media_type_copper)
                reg &= ~(1 << 20);
        ew32(TARC(0), reg);
index 7380442a38299b5480c7858ee344a48bc87905b5..4c303e2a7cb3f3aae1ed974d6a066168c7e82c4a 100644 (file)
@@ -77,24 +77,24 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
                return 0;
        }
 
-       phy->addr                        = 1;
-       phy->autoneg_mask                = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-       phy->reset_delay_us              = 100;
+       phy->addr = 1;
+       phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+       phy->reset_delay_us = 100;
 
-       phy->ops.power_up                = e1000_power_up_phy_copper;
-       phy->ops.power_down              = e1000_power_down_phy_copper_82571;
+       phy->ops.power_up = e1000_power_up_phy_copper;
+       phy->ops.power_down = e1000_power_down_phy_copper_82571;
 
        switch (hw->mac.type) {
        case e1000_82571:
        case e1000_82572:
-               phy->type                = e1000_phy_igp_2;
+               phy->type = e1000_phy_igp_2;
                break;
        case e1000_82573:
-               phy->type                = e1000_phy_m88;
+               phy->type = e1000_phy_m88;
                break;
        case e1000_82574:
        case e1000_82583:
-               phy->type                = e1000_phy_bm;
+               phy->type = e1000_phy_bm;
                phy->ops.acquire = e1000_get_hw_semaphore_82574;
                phy->ops.release = e1000_put_hw_semaphore_82574;
                phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
@@ -193,7 +193,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
                /* EEPROM access above 16k is unsupported */
                if (size > 14)
                        size = 14;
-               nvm->word_size  = 1 << size;
+               nvm->word_size = 1 << size;
                break;
        }
 
@@ -339,7 +339,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
 static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       static int global_quad_port_a; /* global port a indication */
+       static int global_quad_port_a;  /* global port a indication */
        struct pci_dev *pdev = adapter->pdev;
        int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
        s32 rc;
@@ -1003,8 +1003,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
        default:
                break;
        }
-       if (ret_val)
-               e_dbg("Cannot acquire MDIO ownership\n");
 
        ctrl = er32(CTRL);
 
@@ -1015,7 +1013,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
        switch (hw->mac.type) {
        case e1000_82574:
        case e1000_82583:
-               e1000_put_hw_semaphore_82574(hw);
+               /* Release mutex only if the hw semaphore is acquired */
+               if (!ret_val)
+                       e1000_put_hw_semaphore_82574(hw);
                break;
        default:
                break;
@@ -1178,7 +1178,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
 
        /* Transmit Arbitration Control 0 */
        reg = er32(TARC(0));
-       reg &= ~(0xF << 27); /* 30:27 */
+       reg &= ~(0xF << 27);    /* 30:27 */
        switch (hw->mac.type) {
        case e1000_82571:
        case e1000_82572:
@@ -1390,7 +1390,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
        ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
        if (ret_val)
                return false;
-       if (receive_errors == E1000_RECEIVE_ERROR_MAX)  {
+       if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
                ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
                if (ret_val)
                        return false;
index 7c8ca658d553d489f37234fe177d25ed870975d0..59c22bf18701bf6505b13c0f16228a43694e2bb7 100644 (file)
@@ -244,7 +244,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
                mac->autoneg = 1;
                adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
                break;
-       case SPEED_1000 + DUPLEX_HALF: /* not supported */
+       case SPEED_1000 + DUPLEX_HALF:  /* not supported */
        default:
                goto err_inval;
        }
@@ -416,7 +416,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data)
 
 static int e1000_get_regs_len(struct net_device __always_unused *netdev)
 {
-#define E1000_REGS_LEN 32 /* overestimate */
+#define E1000_REGS_LEN 32      /* overestimate */
        return E1000_REGS_LEN * sizeof(u32);
 }
 
@@ -433,22 +433,22 @@ static void e1000_get_regs(struct net_device *netdev,
        regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
            adapter->pdev->device;
 
-       regs_buff[0]  = er32(CTRL);
-       regs_buff[1]  = er32(STATUS);
+       regs_buff[0] = er32(CTRL);
+       regs_buff[1] = er32(STATUS);
 
-       regs_buff[2]  = er32(RCTL);
-       regs_buff[3]  = er32(RDLEN(0));
-       regs_buff[4]  = er32(RDH(0));
-       regs_buff[5]  = er32(RDT(0));
-       regs_buff[6]  = er32(RDTR);
+       regs_buff[2] = er32(RCTL);
+       regs_buff[3] = er32(RDLEN(0));
+       regs_buff[4] = er32(RDH(0));
+       regs_buff[5] = er32(RDT(0));
+       regs_buff[6] = er32(RDTR);
 
-       regs_buff[7]  = er32(TCTL);
-       regs_buff[8]  = er32(TDLEN(0));
-       regs_buff[9]  = er32(TDH(0));
+       regs_buff[7] = er32(TCTL);
+       regs_buff[8] = er32(TDLEN(0));
+       regs_buff[9] = er32(TDH(0));
        regs_buff[10] = er32(TDT(0));
        regs_buff[11] = er32(TIDV);
 
-       regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
+       regs_buff[12] = adapter->hw.phy.type;   /* PHY type (IGP=1, M88=0) */
 
        /* ethtool doesn't use anything past this point, so all this
         * code is likely legacy junk for apps that may or may not exist
@@ -1379,7 +1379,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 
        if (hw->phy.media_type == e1000_media_type_copper &&
            hw->phy.type == e1000_phy_m88) {
-               ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+               ctrl_reg |= E1000_CTRL_ILOS;    /* Invert Loss of Signal */
        } else {
                /* Set the ILOS bit on the fiber Nic if half duplex link is
                 * detected.
@@ -1613,7 +1613,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
                ew32(TDT(0), k);
                e1e_flush();
                msleep(200);
-               time = jiffies; /* set the start time for the receive */
+               time = jiffies; /* set the start time for the receive */
                good_cnt = 0;
                /* receive the sent packets */
                do {
@@ -1636,11 +1636,11 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
                         */
                } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
                if (good_cnt != 64) {
-                       ret_val = 13; /* ret_val is the same as mis-compare */
+                       ret_val = 13;   /* ret_val is the same as mis-compare */
                        break;
                }
                if (jiffies >= (time + 20)) {
-                       ret_val = 14; /* error code for time out error */
+                       ret_val = 14;   /* error code for time out error */
                        break;
                }
        }
index 84850f7a23e4205650b9cef83f12be762a4feea9..a6f903a9b7731cd3460d5337b98d3ae79a4d3661 100644 (file)
@@ -402,13 +402,13 @@ struct e1000_phy_stats {
 
 struct e1000_host_mng_dhcp_cookie {
        u32 signature;
-       u8  status;
-       u8  reserved0;
+       u8 status;
+       u8 reserved0;
        u16 vlan_id;
        u32 reserved1;
        u16 reserved2;
-       u8  reserved3;
-       u8  checksum;
+       u8 reserved3;
+       u8 checksum;
 };
 
 /* Host Interface "Rev 1" */
@@ -427,8 +427,8 @@ struct e1000_host_command_info {
 
 /* Host Interface "Rev 2" */
 struct e1000_host_mng_command_header {
-       u8  command_id;
-       u8  checksum;
+       u8 command_id;
+       u8 checksum;
        u16 reserved1;
        u16 reserved2;
        u16 command_length;
@@ -549,7 +549,7 @@ struct e1000_mac_info {
        u32 mta_shadow[MAX_MTA_REG];
        u16 rar_entry_count;
 
-       u8  forced_speed_duplex;
+       u8 forced_speed_duplex;
 
        bool adaptive_ifs;
        bool has_fwsm;
@@ -577,7 +577,7 @@ struct e1000_phy_info {
 
        u32 addr;
        u32 id;
-       u32 reset_delay_us; /* in usec */
+       u32 reset_delay_us;     /* in usec */
        u32 revision;
 
        enum e1000_media_type media_type;
@@ -636,11 +636,11 @@ struct e1000_dev_spec_82571 {
 };
 
 struct e1000_dev_spec_80003es2lan {
-       bool  mdic_wa_enable;
+       bool mdic_wa_enable;
 };
 
 struct e1000_shadow_ram {
-       u16  value;
+       u16 value;
        bool modified;
 };
 
@@ -660,17 +660,17 @@ struct e1000_hw {
        void __iomem *hw_addr;
        void __iomem *flash_address;
 
-       struct e1000_mac_info  mac;
-       struct e1000_fc_info   fc;
-       struct e1000_phy_info  phy;
-       struct e1000_nvm_info  nvm;
-       struct e1000_bus_info  bus;
+       struct e1000_mac_info mac;
+       struct e1000_fc_info fc;
+       struct e1000_phy_info phy;
+       struct e1000_nvm_info nvm;
+       struct e1000_bus_info bus;
        struct e1000_host_mng_dhcp_cookie mng_cookie;
 
        union {
-               struct e1000_dev_spec_82571     e82571;
+               struct e1000_dev_spec_82571 e82571;
                struct e1000_dev_spec_80003es2lan e80003es2lan;
-               struct e1000_dev_spec_ich8lan   ich8lan;
+               struct e1000_dev_spec_ich8lan ich8lan;
        } dev_spec;
 };
 
index ad9d8f2dd868778d1b4f8a1c2e3b767add06acd8..9dde390f7e71c34f327f1e1bc213dce5338b917c 100644 (file)
@@ -101,12 +101,12 @@ union ich8_hws_flash_regacc {
 /* ICH Flash Protected Region */
 union ich8_flash_protected_range {
        struct ich8_pr {
-               u32 base:13;     /* 0:12 Protected Range Base */
-               u32 reserved1:2; /* 13:14 Reserved */
-               u32 rpe:1;       /* 15 Read Protection Enable */
-               u32 limit:13;    /* 16:28 Protected Range Limit */
-               u32 reserved2:2; /* 29:30 Reserved */
-               u32 wpe:1;       /* 31 Write Protection Enable */
+               u32 base:13;    /* 0:12 Protected Range Base */
+               u32 reserved1:2;        /* 13:14 Reserved */
+               u32 rpe:1;      /* 15 Read Protection Enable */
+               u32 limit:13;   /* 16:28 Protected Range Limit */
+               u32 reserved2:2;        /* 29:30 Reserved */
+               u32 wpe:1;      /* 31 Write Protection Enable */
        } range;
        u32 regval;
 };
@@ -362,21 +362,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
        struct e1000_phy_info *phy = &hw->phy;
        s32 ret_val;
 
-       phy->addr                     = 1;
-       phy->reset_delay_us           = 100;
-
-       phy->ops.set_page             = e1000_set_page_igp;
-       phy->ops.read_reg             = e1000_read_phy_reg_hv;
-       phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
-       phy->ops.read_reg_page        = e1000_read_phy_reg_page_hv;
-       phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
-       phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
-       phy->ops.write_reg            = e1000_write_phy_reg_hv;
-       phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
-       phy->ops.write_reg_page       = e1000_write_phy_reg_page_hv;
-       phy->ops.power_up             = e1000_power_up_phy_copper;
-       phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
-       phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+       phy->addr = 1;
+       phy->reset_delay_us = 100;
+
+       phy->ops.set_page = e1000_set_page_igp;
+       phy->ops.read_reg = e1000_read_phy_reg_hv;
+       phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
+       phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
+       phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
+       phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
+       phy->ops.write_reg = e1000_write_phy_reg_hv;
+       phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
+       phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
+       phy->ops.power_up = e1000_power_up_phy_copper;
+       phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+       phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
        phy->id = e1000_phy_unknown;
 
@@ -445,11 +445,11 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
        s32 ret_val;
        u16 i = 0;
 
-       phy->addr                       = 1;
-       phy->reset_delay_us             = 100;
+       phy->addr = 1;
+       phy->reset_delay_us = 100;
 
-       phy->ops.power_up               = e1000_power_up_phy_copper;
-       phy->ops.power_down             = e1000_power_down_phy_copper_ich8lan;
+       phy->ops.power_up = e1000_power_up_phy_copper;
+       phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 
        /* We may need to do this twice - once for IGP and if that fails,
         * we'll set BM func pointers and try again
@@ -457,7 +457,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
        ret_val = e1000e_determine_phy_address(hw);
        if (ret_val) {
                phy->ops.write_reg = e1000e_write_phy_reg_bm;
-               phy->ops.read_reg  = e1000e_read_phy_reg_bm;
+               phy->ops.read_reg = e1000e_read_phy_reg_bm;
                ret_val = e1000e_determine_phy_address(hw);
                if (ret_val) {
                        e_dbg("Cannot determine PHY addr. Erroring out\n");
@@ -560,7 +560,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
        /* Clear shadow ram */
        for (i = 0; i < nvm->word_size; i++) {
                dev_spec->shadow_ram[i].modified = false;
-               dev_spec->shadow_ram[i].value    = 0xFFFF;
+               dev_spec->shadow_ram[i].value = 0xFFFF;
        }
 
        return 0;
@@ -1012,7 +1012,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
        hw->dev_spec.ich8lan.eee_lp_ability = 0;
 
        if (!link)
-               return 0; /* No link detected */
+               return 0;       /* No link detected */
 
        mac->get_link_status = false;
 
@@ -2816,7 +2816,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
        s32 ret_val = -E1000_ERR_NVM;
        u8 count = 0;
 
-       if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+       if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
                return -E1000_ERR_NVM;
 
        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
@@ -2939,7 +2939,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
         * write to bank 0 etc.  We also need to erase the segment that
         * is going to be written
         */
-       ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+       ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
        if (ret_val) {
                e_dbg("Could not detect valid bank, assuming bank 0\n");
                bank = 0;
@@ -4073,7 +4073,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
 {
        u32 reg;
        u16 data;
-       u8  retry = 0;
+       u8 retry = 0;
 
        if (hw->phy.type != e1000_phy_igp_3)
                return;
index a27e3bcc3249f129c71930d5f1966b4ccf732796..77f81cbb601a4b248f4018f76a3d6efbb94726bc 100644 (file)
@@ -1196,7 +1196,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
               (count < tx_ring->count)) {
                bool cleaned = false;
-               rmb(); /* read buffer_info after eop_desc */
+               rmb();          /* read buffer_info after eop_desc */
                for (; !cleaned; count++) {
                        tx_desc = E1000_TX_DESC(*tx_ring, i);
                        buffer_info = &tx_ring->buffer_info[i];
@@ -1385,7 +1385,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
 
                                skb_put(skb, l1);
                                goto copydone;
-                       } /* if */
+                       }       /* if */
                }
 
                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
@@ -1800,7 +1800,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
        u32 rctl, icr = er32(ICR);
 
        if (!icr || test_bit(__E1000_DOWN, &adapter->state))
-               return IRQ_NONE;  /* Not our interrupt */
+               return IRQ_NONE;        /* Not our interrupt */
 
        /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
         * not set, then the adapter didn't send an interrupt
@@ -2487,7 +2487,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
                else if ((packets < 5) && (bytes > 512))
                        retval = low_latency;
                break;
-       case low_latency:  /* 50 usec aka 20000 ints/s */
+       case low_latency:       /* 50 usec aka 20000 ints/s */
                if (bytes > 10000) {
                        /* this if handles the TSO accounting */
                        if (bytes / packets > 8000)
@@ -2502,7 +2502,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
                        retval = lowest_latency;
                }
                break;
-       case bulk_latency: /* 250 usec aka 4000 ints/s */
+       case bulk_latency:      /* 250 usec aka 4000 ints/s */
                if (bytes > 25000) {
                        if (packets > 35)
                                retval = low_latency;
@@ -2554,7 +2554,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
                new_itr = 70000;
                break;
        case low_latency:
-               new_itr = 20000; /* aka hwitr = ~200 */
+               new_itr = 20000;        /* aka hwitr = ~200 */
                break;
        case bulk_latency:
                new_itr = 4000;
@@ -2673,7 +2673,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
 }
 
 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
-                                __be16 proto, u16 vid)
+                                __always_unused __be16 proto, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -2699,7 +2699,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 }
 
 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
-                                 __be16 proto, u16 vid)
+                                 __always_unused __be16 proto, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -3104,13 +3104,13 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
                /* UPE and MPE will be handled by normal PROMISC logic
                 * in e1000e_set_rx_mode
                 */
-               rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
-                        E1000_RCTL_BAM | /* RX All Bcast Pkts */
-                        E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+               rctl |= (E1000_RCTL_SBP |       /* Receive bad packets */
+                        E1000_RCTL_BAM |       /* RX All Bcast Pkts */
+                        E1000_RCTL_PMCF);      /* RX All MAC Ctrl Pkts */
 
-               rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
-                         E1000_RCTL_DPF | /* Allow filtered pause */
-                         E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+               rctl &= ~(E1000_RCTL_VFE |      /* Disable VLAN filter */
+                         E1000_RCTL_DPF |      /* Allow filtered pause */
+                         E1000_RCTL_CFIEN);    /* Dis VLAN CFIEN Filter */
                /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
                 * and that breaks VLANs.
                 */
@@ -3799,7 +3799,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
                hwm = min(((pba << 10) * 9 / 10),
                          ((pba << 10) - adapter->max_frame_size));
 
-               fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+               fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
                fc->low_water = fc->high_water - 8;
                break;
        case e1000_pchlan:
@@ -3808,10 +3808,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
                 */
                if (adapter->netdev->mtu > ETH_DATA_LEN) {
                        fc->high_water = 0x3500;
-                       fc->low_water  = 0x1500;
+                       fc->low_water = 0x1500;
                } else {
                        fc->high_water = 0x5000;
-                       fc->low_water  = 0x3000;
+                       fc->low_water = 0x3000;
                }
                fc->refresh_time = 0x1000;
                break;
@@ -4581,7 +4581,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.crcerrs += er32(CRCERRS);
        adapter->stats.gprc += er32(GPRC);
        adapter->stats.gorc += er32(GORCL);
-       er32(GORCH); /* Clear gorc */
+       er32(GORCH);            /* Clear gorc */
        adapter->stats.bprc += er32(BPRC);
        adapter->stats.mprc += er32(MPRC);
        adapter->stats.roc += er32(ROC);
@@ -4614,7 +4614,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.xofftxc += er32(XOFFTXC);
        adapter->stats.gptc += er32(GPTC);
        adapter->stats.gotc += er32(GOTCL);
-       er32(GOTCH); /* Clear gotc */
+       er32(GOTCH);            /* Clear gotc */
        adapter->stats.rnbc += er32(RNBC);
        adapter->stats.ruc += er32(RUC);
 
@@ -5106,13 +5106,13 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
        buffer_info = &tx_ring->buffer_info[i];
 
-       context_desc->lower_setup.ip_fields.ipcss  = ipcss;
-       context_desc->lower_setup.ip_fields.ipcso  = ipcso;
-       context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
+       context_desc->lower_setup.ip_fields.ipcss = ipcss;
+       context_desc->lower_setup.ip_fields.ipcso = ipcso;
+       context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
        context_desc->upper_setup.tcp_fields.tucss = tucss;
        context_desc->upper_setup.tcp_fields.tucso = tucso;
        context_desc->upper_setup.tcp_fields.tucse = 0;
-       context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
+       context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
        context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
        context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
@@ -5363,7 +5363,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
                                    struct sk_buff *skb)
 {
-       struct e1000_hw *hw =  &adapter->hw;
+       struct e1000_hw *hw = &adapter->hw;
        u16 length, offset;
 
        if (vlan_tx_tag_present(skb) &&
@@ -6259,7 +6259,7 @@ static void e1000_netpoll(struct net_device *netdev)
                e1000_intr_msi(adapter->pdev->irq, netdev);
                enable_irq(adapter->pdev->irq);
                break;
-       default: /* E1000E_INT_MODE_LEGACY */
+       default:                /* E1000E_INT_MODE_LEGACY */
                disable_irq(adapter->pdev->irq);
                e1000_intr(adapter->pdev->irq, netdev);
                enable_irq(adapter->pdev->irq);
@@ -6589,9 +6589,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
 
        /* construct the net_device struct */
-       netdev->netdev_ops              = &e1000e_netdev_ops;
+       netdev->netdev_ops = &e1000e_netdev_ops;
        e1000e_set_ethtool_ops(netdev);
-       netdev->watchdog_timeo          = 5 * HZ;
+       netdev->watchdog_timeo = 5 * HZ;
        netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
        strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
@@ -7034,7 +7034,6 @@ static void __exit e1000_exit_module(void)
 }
 module_exit(e1000_exit_module);
 
-
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 MODULE_LICENSE("GPL");
index 44ddc0a0ee0e4287ee17b6a389904aaedb9b226a..d70a03906ac0a0cecfb3f078fa584e376eae061a 100644 (file)
@@ -117,7 +117,6 @@ static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
        u16 data;
 
        eecd = er32(EECD);
-
        eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
        data = 0;
 
index 59c76a6815a0854edd9b38067b82dfcede1c9d10..da2be59505c06226b4cd79f9e8cc120945ca1d17 100644 (file)
@@ -1583,13 +1583,13 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
        case e1000_phy_gg82563:
        case e1000_phy_bm:
        case e1000_phy_82578:
-               offset  = M88E1000_PHY_SPEC_STATUS;
-               mask    = M88E1000_PSSR_DOWNSHIFT;
+               offset = M88E1000_PHY_SPEC_STATUS;
+               mask = M88E1000_PSSR_DOWNSHIFT;
                break;
        case e1000_phy_igp_2:
        case e1000_phy_igp_3:
-               offset  = IGP01E1000_PHY_LINK_HEALTH;
-               mask    = IGP01E1000_PLHR_SS_DOWNGRADE;
+               offset = IGP01E1000_PHY_LINK_HEALTH;
+               mask = IGP01E1000_PLHR_SS_DOWNGRADE;
                break;
        default:
                /* speed downshift not supported */
@@ -1653,14 +1653,14 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
 
        if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
            IGP01E1000_PSSR_SPEED_1000MBPS) {
-               offset  = IGP01E1000_PHY_PCS_INIT_REG;
-               mask    = IGP01E1000_PHY_POLARITY_MASK;
+               offset = IGP01E1000_PHY_PCS_INIT_REG;
+               mask = IGP01E1000_PHY_POLARITY_MASK;
        } else {
                /* This really only applies to 10Mbps since
                 * there is no polarity for 100Mbps (always 0).
                 */
-               offset  = IGP01E1000_PHY_PORT_STATUS;
-               mask    = IGP01E1000_PSSR_POLARITY_REVERSED;
+               offset = IGP01E1000_PHY_PORT_STATUS;
+               mask = IGP01E1000_PSSR_POLARITY_REVERSED;
        }
 
        ret_val = e1e_rphy(hw, offset, &data);
@@ -1900,7 +1900,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
 s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
 {
        struct e1000_phy_info *phy = &hw->phy;
-       s32  ret_val;
+       s32 ret_val;
        u16 phy_data;
        bool link;
 
@@ -2253,7 +2253,7 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
        case M88E1011_I_PHY_ID:
                phy_type = e1000_phy_m88;
                break;
-       case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+       case IGP01E1000_I_PHY_ID:       /* IGP 1 & 2 share this */
                phy_type = e1000_phy_igp_2;
                break;
        case GG82563_E_PHY_ID:
@@ -2317,7 +2317,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
                        /* If phy_type is valid, break - we found our
                         * PHY address
                         */
-                       if (phy_type  != e1000_phy_unknown)
+                       if (phy_type != e1000_phy_unknown)
                                return 0;
 
                        usleep_range(1000, 2000);
index ff6a17cb136280ec5547b128c74126e757f6bd52..f21a91a299a2200995be96c6a92dd05d80060c9b 100644 (file)
@@ -401,12 +401,82 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
        return 0;
 }
 
+/**
+ *  igb_set_sfp_media_type_82575 - derives SFP module media type.
+ *  @hw: pointer to the HW structure
+ *
+ *  The media type is chosen based on SFP module.
+ *  compatibility flags retrieved from SFP ID EEPROM.
+ **/
+static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_ERR_CONFIG;
+       u32 ctrl_ext = 0;
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+       struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
+       u8 tranceiver_type = 0;
+       s32 timeout = 3;
+
+       /* Turn I2C interface ON and power on sfp cage */
+       ctrl_ext = rd32(E1000_CTRL_EXT);
+       ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+       wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+
+       wrfl();
+
+       /* Read SFP module data */
+       while (timeout) {
+               ret_val = igb_read_sfp_data_byte(hw,
+                       E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+                       &tranceiver_type);
+               if (ret_val == 0)
+                       break;
+               msleep(100);
+               timeout--;
+       }
+       if (ret_val != 0)
+               goto out;
+
+       ret_val = igb_read_sfp_data_byte(hw,
+                       E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+                       (u8 *)eth_flags);
+       if (ret_val != 0)
+               goto out;
+
+       /* Check if there is some SFP module plugged and powered */
+       if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+           (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+               dev_spec->module_plugged = true;
+               if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+                       hw->phy.media_type = e1000_media_type_internal_serdes;
+               } else if (eth_flags->e100_base_fx) {
+                       dev_spec->sgmii_active = true;
+                       hw->phy.media_type = e1000_media_type_internal_serdes;
+               } else if (eth_flags->e1000_base_t) {
+                       dev_spec->sgmii_active = true;
+                       hw->phy.media_type = e1000_media_type_copper;
+               } else {
+                       hw->phy.media_type = e1000_media_type_unknown;
+                       hw_dbg("PHY module has not been recognized\n");
+                       goto out;
+               }
+       } else {
+               hw->phy.media_type = e1000_media_type_unknown;
+       }
+       ret_val = 0;
+out:
+       /* Restore I2C interface setting */
+       wr32(E1000_CTRL_EXT, ctrl_ext);
+       return ret_val;
+}
+
 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
        struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
        s32 ret_val;
        u32 ctrl_ext = 0;
+       u32 link_mode = 0;
 
        switch (hw->device_id) {
        case E1000_DEV_ID_82575EB_COPPER:
@@ -470,15 +540,55 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
         */
        hw->phy.media_type = e1000_media_type_copper;
        dev_spec->sgmii_active = false;
+       dev_spec->module_plugged = false;
 
        ctrl_ext = rd32(E1000_CTRL_EXT);
-       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
-       case E1000_CTRL_EXT_LINK_MODE_SGMII:
-               dev_spec->sgmii_active = true;
-               break;
+
+       link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+       switch (link_mode) {
        case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
-       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
                hw->phy.media_type = e1000_media_type_internal_serdes;
+               break;
+       case E1000_CTRL_EXT_LINK_MODE_SGMII:
+               /* Get phy control interface type set (MDIO vs. I2C)*/
+               if (igb_sgmii_uses_mdio_82575(hw)) {
+                       hw->phy.media_type = e1000_media_type_copper;
+                       dev_spec->sgmii_active = true;
+                       break;
+               }
+               /* fall through for I2C based SGMII */
+       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+               /* read media type from SFP EEPROM */
+               ret_val = igb_set_sfp_media_type_82575(hw);
+               if ((ret_val != 0) ||
+                   (hw->phy.media_type == e1000_media_type_unknown)) {
+                       /* If media type was not identified then return media
+                        * type defined by the CTRL_EXT settings.
+                        */
+                       hw->phy.media_type = e1000_media_type_internal_serdes;
+
+                       if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+                               hw->phy.media_type = e1000_media_type_copper;
+                               dev_spec->sgmii_active = true;
+                       }
+
+                       break;
+               }
+
+               /* do not change link mode for 100BaseFX */
+               if (dev_spec->eth_flags.e100_base_fx)
+                       break;
+
+               /* change current link mode setting */
+               ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+
+               if (hw->phy.media_type == e1000_media_type_copper)
+                       ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+               else
+                       ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+               wr32(E1000_CTRL_EXT, ctrl_ext);
+
                break;
        default:
                break;
index 31a0f82cc650b9ae1eca5940d2f939632c337dcc..aa201abb8ad2809aeadbfbea73f00bc4f913e132 100644 (file)
 /* Clear Interrupt timers after IMS clear */
 /* packet buffer parity error detection enabled */
 /* descriptor FIFO parity error detection enable */
-#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
-#define E1000_I2CCMD_REG_ADDR_SHIFT   16
-#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
-#define E1000_I2CCMD_OPCODE_READ      0x08000000
-#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
-#define E1000_I2CCMD_READY            0x20000000
-#define E1000_I2CCMD_ERROR            0x80000000
-#define E1000_MAX_SGMII_PHY_REG_ADDR  255
-#define E1000_I2CCMD_PHY_TIMEOUT      200
-#define E1000_IVAR_VALID              0x80
-#define E1000_GPIE_NSICR              0x00000001
-#define E1000_GPIE_MSIX_MODE          0x00000010
-#define E1000_GPIE_EIAME              0x40000000
-#define E1000_GPIE_PBA                0x80000000
+#define E1000_CTRL_EXT_PBA_CLR         0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT    16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT    24
+#define E1000_I2CCMD_OPCODE_READ       0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE      0x00000000
+#define E1000_I2CCMD_READY             0x20000000
+#define E1000_I2CCMD_ERROR             0x80000000
+#define E1000_I2CCMD_SFP_DATA_ADDR(a)  (0x0000 + (a))
+#define E1000_I2CCMD_SFP_DIAG_ADDR(a)  (0x0100 + (a))
+#define E1000_MAX_SGMII_PHY_REG_ADDR   255
+#define E1000_I2CCMD_PHY_TIMEOUT       200
+#define E1000_IVAR_VALID               0x80
+#define E1000_GPIE_NSICR               0x00000001
+#define E1000_GPIE_MSIX_MODE           0x00000010
+#define E1000_GPIE_EIAME               0x40000000
+#define E1000_GPIE_PBA                 0x80000000
 
 /* Receive Descriptor bit definitions */
 #define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
 #define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
 
 /* LED Control */
-#define E1000_LEDCTL_LED0_MODE_SHIFT      0
-#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED0_MODE_SHIFT   0
+#define E1000_LEDCTL_LED0_BLINK                0x00000080
+#define E1000_LEDCTL_LED0_MODE_MASK    0x0000000F
+#define E1000_LEDCTL_LED0_IVRT         0x00000040
 
 #define E1000_LEDCTL_MODE_LED_ON        0xE
 #define E1000_LEDCTL_MODE_LED_OFF       0xF
index 488abb24a54f9f5d3cdb91e2df45f464a03ecdea..94d7866b9c2086b4d0919711a7b6eec2f889bf20 100644 (file)
@@ -528,6 +528,8 @@ struct e1000_dev_spec_82575 {
        bool global_device_reset;
        bool eee_disable;
        bool clear_semaphore_once;
+       struct e1000_sfp_flags eth_flags;
+       bool module_plugged;
 };
 
 struct e1000_hw {
index bfc08e05c90741849d3445cbca337f98d466db2d..5caa332e7556628806491b186ada8e43ec88abf0 100644 (file)
@@ -82,11 +82,11 @@ enum E1000_INVM_STRUCTURE_TYPE {
 #define E1000_INVM_MAJOR_SHIFT         4
 
 #define ID_LED_DEFAULT_I210            ((ID_LED_OFF1_ON2  << 8) | \
-                                        (ID_LED_OFF1_OFF2 <<  4) | \
-                                        (ID_LED_DEF1_DEF2))
+                                        (ID_LED_DEF1_DEF2 <<  4) | \
+                                        (ID_LED_OFF1_OFF2))
 #define ID_LED_DEFAULT_I210_SERDES     ((ID_LED_DEF1_DEF2 << 8) | \
                                         (ID_LED_DEF1_DEF2 <<  4) | \
-                                        (ID_LED_DEF1_DEF2))
+                                        (ID_LED_OFF1_ON2))
 
 /* NVM offset defaults for i211 device */
 #define NVM_INIT_CTRL_2_DEFAULT_I211   0X7243
index 2559d70a2321b717b05b167a2c1c3a5b4f9f44a6..bab556a47fcc48cdb7d90a044f582ef18c335be0 100644 (file)
@@ -1332,7 +1332,13 @@ s32 igb_id_led_init(struct e1000_hw *hw)
        u16 data, i, temp;
        const u16 led_mask = 0x0F;
 
-       ret_val = igb_valid_led_default(hw, &data);
+       /* i210 and i211 devices have different LED mechanism */
+       if ((hw->mac.type == e1000_i210) ||
+           (hw->mac.type == e1000_i211))
+               ret_val = igb_valid_led_default_i210(hw, &data);
+       else
+               ret_val = igb_valid_led_default(hw, &data);
+
        if (ret_val)
                goto out;
 
@@ -1406,15 +1412,34 @@ s32 igb_blink_led(struct e1000_hw *hw)
        u32 ledctl_blink = 0;
        u32 i;
 
-       /* set the blink bit for each LED that's "on" (0x0E)
-        * in ledctl_mode2
-        */
-       ledctl_blink = hw->mac.ledctl_mode2;
-       for (i = 0; i < 4; i++)
-               if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
-                   E1000_LEDCTL_MODE_LED_ON)
-                       ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
-                                        (i * 8));
+       if (hw->phy.media_type == e1000_media_type_fiber) {
+               /* always blink LED0 for PCI-E fiber */
+               ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+                    (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+       } else {
+               /* Set the blink bit for each LED that's "on" (0x0E)
+                * (or "off" if inverted) in ledctl_mode2.  The blink
+                * logic in hardware only works when mode is set to "on"
+                * so it must be changed accordingly when the mode is
+                * "off" and inverted.
+                */
+               ledctl_blink = hw->mac.ledctl_mode2;
+               for (i = 0; i < 32; i += 8) {
+                       u32 mode = (hw->mac.ledctl_mode2 >> i) &
+                           E1000_LEDCTL_LED0_MODE_MASK;
+                       u32 led_default = hw->mac.ledctl_default >> i;
+
+                       if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+                            (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+                           ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+                            (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+                               ledctl_blink &=
+                                   ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+                               ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+                                                E1000_LEDCTL_MODE_LED_ON) << i;
+                       }
+               }
+       }
 
        wr32(E1000_LEDCTL, ledctl_blink);
 
index 115b0da6e01337d528f8b59dd9a02a097b15d2b1..1d6a401cc5d4ba254cd1ca8cc133450bbf8a625c 100644 (file)
@@ -340,6 +340,130 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
        return 0;
 }
 
+/**
+ *  igb_read_sfp_data_byte - Reads SFP module data.
+ *  @hw: pointer to the HW structure
+ *  @offset: byte location offset to be read
+ *  @data: read data buffer pointer
+ *
+ *  Reads one byte from SFP module data stored
+ *  in SFP resided EEPROM memory or SFP diagnostic area.
+ *  Function should be called with
+ *  E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ *  E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ *  access
+ **/
+s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+{
+       u32 i = 0;
+       u32 i2ccmd = 0;
+       u32 data_local = 0;
+
+       if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+               hw_dbg("I2CCMD command address exceeds upper limit\n");
+               return -E1000_ERR_PHY;
+       }
+
+       /* Set up Op-code, EEPROM Address,in the I2CCMD
+        * register. The MAC will take care of interfacing with the
+        * EEPROM to retrieve the desired data.
+        */
+       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+                 E1000_I2CCMD_OPCODE_READ);
+
+       wr32(E1000_I2CCMD, i2ccmd);
+
+       /* Poll the ready bit to see if the I2C read completed */
+       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+               udelay(50);
+               data_local = rd32(E1000_I2CCMD);
+               if (data_local & E1000_I2CCMD_READY)
+                       break;
+       }
+       if (!(data_local & E1000_I2CCMD_READY)) {
+               hw_dbg("I2CCMD Read did not complete\n");
+               return -E1000_ERR_PHY;
+       }
+       if (data_local & E1000_I2CCMD_ERROR) {
+               hw_dbg("I2CCMD Error bit set\n");
+               return -E1000_ERR_PHY;
+       }
+       *data = (u8) data_local & 0xFF;
+
+       return 0;
+}
+
+/**
+ *  e1000_write_sfp_data_byte - Writes SFP module data.
+ *  @hw: pointer to the HW structure
+ *  @offset: byte location offset to write to
+ *  @data: data to write
+ *
+ *  Writes one byte to SFP module data stored
+ *  in SFP resided EEPROM memory or SFP diagnostic area.
+ *  Function should be called with
+ *  E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ *  E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ *  access
+ **/
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
+{
+       u32 i = 0;
+       u32 i2ccmd = 0;
+       u32 data_local = 0;
+
+       if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+               hw_dbg("I2CCMD command address exceeds upper limit\n");
+               return -E1000_ERR_PHY;
+       }
+       /* The programming interface is 16 bits wide
+        * so we need to read the whole word first
+        * then update appropriate byte lane and write
+        * the updated word back.
+        */
+       /* Set up Op-code, EEPROM Address,in the I2CCMD
+        * register. The MAC will take care of interfacing
+        * with an EEPROM to write the data given.
+        */
+       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+                 E1000_I2CCMD_OPCODE_READ);
+       /* Set a command to read single word */
+       wr32(E1000_I2CCMD, i2ccmd);
+       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+               udelay(50);
+               /* Poll the ready bit to see if lastly
+                * launched I2C operation completed
+                */
+               i2ccmd = rd32(E1000_I2CCMD);
+               if (i2ccmd & E1000_I2CCMD_READY) {
+                       /* Check if this is READ or WRITE phase */
+                       if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
+                           E1000_I2CCMD_OPCODE_READ) {
+                               /* Write the selected byte
+                                * lane and update whole word
+                                */
+                               data_local = i2ccmd & 0xFF00;
+                               data_local |= data;
+                               i2ccmd = ((offset <<
+                                       E1000_I2CCMD_REG_ADDR_SHIFT) |
+                                       E1000_I2CCMD_OPCODE_WRITE | data_local);
+                               wr32(E1000_I2CCMD, i2ccmd);
+                       } else {
+                               break;
+                       }
+               }
+       }
+       if (!(i2ccmd & E1000_I2CCMD_READY)) {
+               hw_dbg("I2CCMD Write did not complete\n");
+               return -E1000_ERR_PHY;
+       }
+       if (i2ccmd & E1000_I2CCMD_ERROR) {
+               hw_dbg("I2CCMD Error bit set\n");
+               return -E1000_ERR_PHY;
+       }
+       return 0;
+}
+
 /**
  *  igb_read_phy_reg_igp - Read igp PHY register
  *  @hw: pointer to the HW structure
index 784fd1c40989fc86bbdb5d750bc61f5cd40da187..6a0873f2095a49affef31c68ced1dd0d785aa265 100644 (file)
@@ -69,6 +69,8 @@ s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+s32  e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
 s32  igb_copper_link_setup_82580(struct e1000_hw *hw);
 s32  igb_get_phy_info_82580(struct e1000_hw *hw);
 s32  igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
@@ -157,4 +159,22 @@ s32  igb_check_polarity_m88(struct e1000_hw *hw);
 #define GS40G_CS_POWER_DOWN            0x0002
 #define GS40G_LINE_LB                  0x4000
 
+/* SFP modules ID memory locations */
+#define E1000_SFF_IDENTIFIER_OFFSET    0x00
+#define E1000_SFF_IDENTIFIER_SFF       0x02
+#define E1000_SFF_IDENTIFIER_SFP       0x03
+
+#define E1000_SFF_ETH_FLAGS_OFFSET     0x06
+/* Flags for SFP modules compatible with ETH up to 1Gb */
+struct e1000_sfp_flags {
+       u8 e1000_base_sx:1;
+       u8 e1000_base_lx:1;
+       u8 e1000_base_cx:1;
+       u8 e1000_base_t:1;
+       u8 e100_base_lx:1;
+       u8 e100_base_fx:1;
+       u8 e10_base_bx10:1;
+       u8 e10_base_px:1;
+};
+
 #endif
index 9d6c075e232d9c5c2885cebcfa58c297e03b049e..15ea8dc9dad3dc1c5c86e6682882802da574d9d3 100644 (file)
@@ -322,11 +322,6 @@ static inline int igb_desc_unused(struct igb_ring *ring)
        return ring->count + ring->next_to_clean - ring->next_to_use - 1;
 }
 
-struct igb_i2c_client_list {
-       struct i2c_client *client;
-       struct igb_i2c_client_list *next;
-};
-
 #ifdef CONFIG_IGB_HWMON
 
 #define IGB_HWMON_TYPE_LOC     0
@@ -514,13 +509,18 @@ extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
 extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
                                unsigned char *va,
                                struct sk_buff *skb);
-static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
                                       union e1000_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
 {
        if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
            !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
-               igb_ptp_rx_rgtstamp(q_vector, skb);
+               igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+
+       /* Update the last_rx_timestamp timer in order to enable watchdog check
+        * for error case of latched timestamp on a dropped packet.
+        */
+       rx_ring->last_rx_timestamp = jiffies;
 }
 
 extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
index 7876240fa74e27c16e759168ed662750aae4c66a..85fe7b52f435cf9444cab5d1c466e9e604e3f40b 100644 (file)
@@ -142,6 +142,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+       struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
        u32 status;
 
        if (hw->phy.media_type == e1000_media_type_copper) {
@@ -162,49 +164,26 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                        ecmd->advertising |= hw->phy.autoneg_advertised;
                }
 
-               if (hw->mac.autoneg != 1)
-                       ecmd->advertising &= ~(ADVERTISED_Pause |
-                                              ADVERTISED_Asym_Pause);
-
-               if (hw->fc.requested_mode == e1000_fc_full)
-                       ecmd->advertising |= ADVERTISED_Pause;
-               else if (hw->fc.requested_mode == e1000_fc_rx_pause)
-                       ecmd->advertising |= (ADVERTISED_Pause |
-                                             ADVERTISED_Asym_Pause);
-               else if (hw->fc.requested_mode == e1000_fc_tx_pause)
-                       ecmd->advertising |=  ADVERTISED_Asym_Pause;
-               else
-                       ecmd->advertising &= ~(ADVERTISED_Pause |
-                                              ADVERTISED_Asym_Pause);
-
                ecmd->port = PORT_TP;
                ecmd->phy_address = hw->phy.addr;
                ecmd->transceiver = XCVR_INTERNAL;
        } else {
-               ecmd->supported = (SUPPORTED_1000baseT_Full |
-                                  SUPPORTED_100baseT_Full |
-                                  SUPPORTED_FIBRE |
+               ecmd->supported = (SUPPORTED_FIBRE |
                                   SUPPORTED_Autoneg |
                                   SUPPORTED_Pause);
-               if (hw->mac.type == e1000_i354)
-                               ecmd->supported |= SUPPORTED_2500baseX_Full;
-
                ecmd->advertising = ADVERTISED_FIBRE;
-
-               switch (adapter->link_speed) {
-               case SPEED_2500:
-                       ecmd->advertising = ADVERTISED_2500baseX_Full;
-                       break;
-               case SPEED_1000:
-                       ecmd->advertising = ADVERTISED_1000baseT_Full;
-                       break;
-               case SPEED_100:
-                       ecmd->advertising = ADVERTISED_100baseT_Full;
-                       break;
-               default:
-                       break;
+               if (hw->mac.type == e1000_i354) {
+                       ecmd->supported |= SUPPORTED_2500baseX_Full;
+                       ecmd->advertising |= ADVERTISED_2500baseX_Full;
+               }
+               if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
+                       ecmd->supported |= SUPPORTED_1000baseT_Full;
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               }
+               if (eth_flags->e100_base_fx) {
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
                }
-
                if (hw->mac.autoneg == 1)
                        ecmd->advertising |= ADVERTISED_Autoneg;
 
@@ -212,6 +191,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                ecmd->transceiver = XCVR_EXTERNAL;
        }
 
+       if (hw->mac.autoneg != 1)
+               ecmd->advertising &= ~(ADVERTISED_Pause |
+                                      ADVERTISED_Asym_Pause);
+
+       if (hw->fc.requested_mode == e1000_fc_full)
+               ecmd->advertising |= ADVERTISED_Pause;
+       else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+               ecmd->advertising |= (ADVERTISED_Pause |
+                                     ADVERTISED_Asym_Pause);
+       else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+               ecmd->advertising |=  ADVERTISED_Asym_Pause;
+       else
+               ecmd->advertising &= ~(ADVERTISED_Pause |
+                                      ADVERTISED_Asym_Pause);
+
        status = rd32(E1000_STATUS);
 
        if (status & E1000_STATUS_LU) {
@@ -392,6 +386,10 @@ static int igb_set_pauseparam(struct net_device *netdev,
        struct e1000_hw *hw = &adapter->hw;
        int retval = 0;
 
+       /* 100basefx does not support setting link flow control */
+       if (hw->dev_spec._82575.eth_flags.e100_base_fx)
+               return -EINVAL;
+
        adapter->fc_autoneg = pause->autoneg;
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
@@ -813,10 +811,8 @@ static int igb_set_eeprom(struct net_device *netdev,
        ret_val = hw->nvm.ops.write(hw, first_word,
                                    last_word - first_word + 1, eeprom_buff);
 
-       /* Update the checksum over the first part of the EEPROM if needed
-        * and flush shadow RAM for 82573 controllers
-        */
-       if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
+       /* Update the checksum if nvm write succeeded */
+       if (ret_val == 0)
                hw->nvm.ops.update(hw);
 
        igb_set_fw_version(adapter);
index 64cbe0dfe04347aff1af3fd15f5eb130caefaff6..6a0c1b66ce54116b88a8aaf33bcf0518a7ef7ca3 100644 (file)
@@ -1667,10 +1667,13 @@ void igb_down(struct igb_adapter *adapter)
        wrfl();
        msleep(10);
 
-       for (i = 0; i < adapter->num_q_vectors; i++)
+       igb_irq_disable(adapter);
+
+       for (i = 0; i < adapter->num_q_vectors; i++) {
+               napi_synchronize(&(adapter->q_vector[i]->napi));
                napi_disable(&(adapter->q_vector[i]->napi));
+       }
 
-       igb_irq_disable(adapter);
 
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
@@ -6622,7 +6625,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
 
        igb_rx_checksum(rx_ring, rx_desc, skb);
 
-       igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+       igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
 
        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
index ca932387a80faaad7775ae782b2aa82c1926c25e..fb098b46c6a634e9a1a401d055b0af9ba154bb61 100644 (file)
 #include <linux/dca.h>
 #endif
 
+#include <net/ll_poll.h>
+
+#ifdef CONFIG_NET_LL_RX_POLL
+#define LL_EXTENDED_STATS
+#endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -182,6 +187,11 @@ struct ixgbe_rx_buffer {
 struct ixgbe_queue_stats {
        u64 packets;
        u64 bytes;
+#ifdef LL_EXTENDED_STATS
+       u64 yields;
+       u64 misses;
+       u64 cleaned;
+#endif  /* LL_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
@@ -356,9 +366,133 @@ struct ixgbe_q_vector {
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[IFNAMSIZ + 9];
 
+#ifdef CONFIG_NET_LL_RX_POLL
+       unsigned int state;
+#define IXGBE_QV_STATE_IDLE        0
+#define IXGBE_QV_STATE_NAPI       1    /* NAPI owns this QV */
+#define IXGBE_QV_STATE_POLL       2    /* poll owns this QV */
+#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
+#define IXGBE_QV_STATE_NAPI_YIELD  4    /* NAPI yielded this QV */
+#define IXGBE_QV_STATE_POLL_YIELD  8    /* poll yielded this QV */
+#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
+#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
+       spinlock_t lock;
+#endif  /* CONFIG_NET_LL_RX_POLL */
+
        /* for dynamic allocation of rings associated with this q_vector */
        struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
 };
+#ifdef CONFIG_NET_LL_RX_POLL
+static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
+{
+
+       spin_lock_init(&q_vector->lock);
+       q_vector->state = IXGBE_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
+{
+       int rc = true;
+       spin_lock(&q_vector->lock);
+       if (q_vector->state & IXGBE_QV_LOCKED) {
+               WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
+               q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
+               rc = false;
+#ifdef LL_EXTENDED_STATS
+               q_vector->tx.ring->stats.yields++;
+#endif
+       } else
+               /* we don't care if someone yielded */
+               q_vector->state = IXGBE_QV_STATE_NAPI;
+       spin_unlock(&q_vector->lock);
+       return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
+{
+       int rc = false;
+       spin_lock(&q_vector->lock);
+       WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
+                              IXGBE_QV_STATE_NAPI_YIELD));
+
+       if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
+               rc = true;
+       q_vector->state = IXGBE_QV_STATE_IDLE;
+       spin_unlock(&q_vector->lock);
+       return rc;
+}
+
+/* called from ixgbe_low_latency_poll() */
+static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
+{
+       int rc = true;
+       spin_lock_bh(&q_vector->lock);
+       if ((q_vector->state & IXGBE_QV_LOCKED)) {
+               q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
+               rc = false;
+#ifdef LL_EXTENDED_STATS
+               q_vector->rx.ring->stats.yields++;
+#endif
+       } else
+               /* preserve yield marks */
+               q_vector->state |= IXGBE_QV_STATE_POLL;
+       spin_unlock_bh(&q_vector->lock);
+       return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
+{
+       int rc = false;
+       spin_lock_bh(&q_vector->lock);
+       WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI));
+
+       if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
+               rc = true;
+       q_vector->state = IXGBE_QV_STATE_IDLE;
+       spin_unlock_bh(&q_vector->lock);
+       return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+{
+       WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
+       return q_vector->state & IXGBE_QV_USER_PEND;
+}
+#else /* CONFIG_NET_LL_RX_POLL */
+static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
+{
+}
+
+static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
+{
+       return true;
+}
+
+static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
+{
+       return false;
+}
+
+static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
+{
+       return false;
+}
+
+static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
+{
+       return false;
+}
+
+static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+{
+       return false;
+}
+#endif /* CONFIG_NET_LL_RX_POLL */
+
 #ifdef CONFIG_IXGBE_HWMON
 
 #define IXGBE_HWMON_TYPE_LOC           0
index 1f2c805684dd3ee46027d49ca8289b8d4d63b4e9..e055e000131bdcf0c32280270361c957d0a4d8a0 100644 (file)
@@ -380,3 +380,26 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
        }
        return 0;
 }
+
+static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
+{
+       u32 reg, i;
+
+       reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+       for (i = 0; i < MAX_USER_PRIORITY; i++)
+               map[i] = IXGBE_RTRUP2TC_UP_MASK &
+                       (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
+       return;
+}
+
+void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
+{
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ixgbe_dcb_read_rtrup2tc_82599(hw, map);
+               break;
+       default:
+               break;
+       }
+}
index 1634de8b627f179a99b3836b37bd69af54fbbfaf..fc0a2dd5249956bf19ba836bad5c2d92d61b1b60 100644 (file)
@@ -159,6 +159,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
 s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
 
+void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
+
 /* DCB definitions for credit calculation */
 #define DCB_CREDIT_QUANTUM     64   /* DCB Quantum */
 #define MAX_CREDIT_REFILL       511  /* 0x1FF * 64B = 32704B */
index a4ef07631d1e349aafd5d8f0a110acdafdccd279..d71d9ce3e394b4199f297ebbcb493fa268092dd2 100644 (file)
@@ -45,6 +45,7 @@
 
 /* Receive UP2TC mapping */
 #define IXGBE_RTRUP2TC_UP_SHIFT 3
+#define IXGBE_RTRUP2TC_UP_MASK 7
 /* Transmit UP2TC mapping */
 #define IXGBE_RTTUP2TC_UP_SHIFT 3
 
index f3d68f9696ba0b9b6e430ec38574bd3472b75bce..edd89a1ef27f67f0f40d0cefba9ebed3e39f3185 100644 (file)
@@ -554,6 +554,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
                        adapter->ixgbe_ieee_ets->prio_tc[i] =
                                IEEE_8021QAZ_MAX_TCS;
+               /* if possible update UP2TC mappings from HW */
+               ixgbe_dcb_read_rtrup2tc(&adapter->hw,
+                                       adapter->ixgbe_ieee_ets->prio_tc);
        }
 
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
index d3754722adb4097ffdf168df442fe867859171bf..24e2e7aafda2d42201541ef9ee855ee26e1661cb 100644 (file)
@@ -1054,6 +1054,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
+#ifdef LL_EXTENDED_STATS
+                       data[i] = 0;
+                       data[i+1] = 0;
+                       data[i+2] = 0;
+                       i += 3;
+#endif
                        continue;
                }
 
@@ -1063,6 +1069,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
+#ifdef LL_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i+1] = ring->stats.misses;
+               data[i+2] = ring->stats.cleaned;
+               i += 3;
+#endif
        }
        for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
                ring = adapter->rx_ring[j];
@@ -1070,6 +1082,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
+#ifdef LL_EXTENDED_STATS
+                       data[i] = 0;
+                       data[i+1] = 0;
+                       data[i+2] = 0;
+                       i += 3;
+#endif
                        continue;
                }
 
@@ -1079,6 +1097,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
+#ifdef LL_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i+1] = ring->stats.misses;
+               data[i+2] = ring->stats.cleaned;
+               i += 3;
+#endif
        }
 
        for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
@@ -1115,12 +1139,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
+#ifdef LL_EXTENDED_STATS
+                       sprintf(p, "tx_q_%u_napi_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_q_%u_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_q_%u_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* LL_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
+#ifdef LL_EXTENDED_STATS
+                       sprintf(p, "rx_q_%u_ll_poll_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_q_%u_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_q_%u_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* LL_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
                        sprintf(p, "tx_pb_%u_pxon", i);
index ef5f7a678ce14214eff82689efd4d999fc7d926b..90b4e1089eccae8c794dc10185a550662ae77e57 100644 (file)
@@ -811,6 +811,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
        /* initialize NAPI */
        netif_napi_add(adapter->netdev, &q_vector->napi,
                       ixgbe_poll, 64);
+       napi_hash_add(&q_vector->napi);
 
        /* tie q_vector and adapter together */
        adapter->q_vector[v_idx] = q_vector;
@@ -931,6 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
                adapter->rx_ring[ring->queue_index] = NULL;
 
        adapter->q_vector[v_idx] = NULL;
+       napi_hash_del(&q_vector->napi);
        netif_napi_del(&q_vector->napi);
 
        /*
index d30fbdd81fca61173c6e1d9a10baab3fbb5edc44..047ebaaf01413596bfc0c23363a43fea877ea955 100644 (file)
@@ -1504,7 +1504,9 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 
-       if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+       if (ixgbe_qv_ll_polling(q_vector))
+               netif_receive_skb(skb);
+       else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
                napi_gro_receive(&q_vector->napi, skb);
        else
                netif_rx(skb);
@@ -1892,9 +1894,9 @@ dma_sync:
  * expensive overhead for IOMMU access this provides a means of avoiding
  * it by maintaining the mapping of the page to the syste.
  *
- * Returns true if all work is completed without reaching budget
+ * Returns amount of work completed
  **/
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *rx_ring,
                               const int budget)
 {
@@ -1976,6 +1978,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
 #endif /* IXGBE_FCOE */
+               skb_mark_ll(skb, &q_vector->napi);
                ixgbe_rx_skb(q_vector, skb);
 
                /* update budget accounting */
@@ -1992,9 +1995,43 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        if (cleaned_count)
                ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 
-       return (total_rx_packets < budget);
+       return total_rx_packets;
 }
 
+#ifdef CONFIG_NET_LL_RX_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbe_low_latency_recv(struct napi_struct *napi)
+{
+       struct ixgbe_q_vector *q_vector =
+                       container_of(napi, struct ixgbe_q_vector, napi);
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_ring  *ring;
+       int found = 0;
+
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               return LL_FLUSH_FAILED;
+
+       if (!ixgbe_qv_lock_poll(q_vector))
+               return LL_FLUSH_BUSY;
+
+       ixgbe_for_each_ring(ring, q_vector->rx) {
+               found = ixgbe_clean_rx_irq(q_vector, ring, 4);
+#ifdef LL_EXTENDED_STATS
+               if (found)
+                       ring->stats.cleaned += found;
+               else
+                       ring->stats.misses++;
+#endif
+               if (found)
+                       break;
+       }
+
+       ixgbe_qv_unlock_poll(q_vector);
+
+       return found;
+}
+#endif /* CONFIG_NET_LL_RX_POLL */
+
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -2550,6 +2587,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        ixgbe_for_each_ring(ring, q_vector->tx)
                clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
 
+       if (!ixgbe_qv_lock_napi(q_vector))
+               return budget;
+
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
        if (q_vector->rx.count > 1)
@@ -2558,9 +2598,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                per_ring_budget = budget;
 
        ixgbe_for_each_ring(ring, q_vector->rx)
-               clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
-                                                    per_ring_budget);
+               clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
+                                  per_ring_budget) < per_ring_budget);
 
+       ixgbe_qv_unlock_napi(q_vector);
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
@@ -3747,16 +3788,25 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 {
        int q_idx;
 
-       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+               ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
                napi_enable(&adapter->q_vector[q_idx]->napi);
+       }
 }
 
 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 {
        int q_idx;
 
-       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+       local_bh_disable(); /* for ixgbe_qv_lock_napi() */
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
                napi_disable(&adapter->q_vector[q_idx]->napi);
+               while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
+                       pr_info("QV %d locked\n", q_idx);
+                       mdelay(1);
+               }
+       }
+       local_bh_enable();
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -7177,6 +7227,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
+#ifdef CONFIG_NET_LL_RX_POLL
+       .ndo_ll_poll            = ixgbe_low_latency_recv,
+#endif
 #ifdef IXGBE_FCOE
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
        .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
index 070a6f1a057745a803d3a6ccd6d8201644e95703..7fbe6abf60542fe483170eb807f6f16f2e563cad 100644 (file)
@@ -3148,7 +3148,6 @@ jme_init_one(struct pci_dev *pdev,
        jme->mii_if.mdio_write = jme_mdio_write;
 
        jme_clear_pm(jme);
-       pci_set_power_state(jme->pdev, PCI_D0);
        device_set_wakeup_enable(&pdev->dev, true);
 
        jme_set_phyfifo_5level(jme);
index 5409fe876a441607488d43ac39829f7e5fc9e891..270e65f21102415d554d115956644c862e02725e 100644 (file)
@@ -483,7 +483,6 @@ static void korina_multicast_list(struct net_device *dev)
        unsigned long flags;
        struct netdev_hw_addr *ha;
        u32 recognise = ETH_ARC_AB;     /* always accept broadcasts */
-       int i;
 
        /* Set promiscuous mode */
        if (dev->flags & IFF_PROMISC)
@@ -495,12 +494,9 @@ static void korina_multicast_list(struct net_device *dev)
 
        /* Build the hash table */
        if (netdev_mc_count(dev) > 4) {
-               u16 hash_table[4];
+               u16 hash_table[4] = { 0 };
                u32 crc;
 
-               for (i = 0; i < 4; i++)
-                       hash_table[i] = 0;
-
                netdev_for_each_mc_addr(ha, dev) {
                        crc = ether_crc_le(6, ha->addr);
                        crc >>= 26;
@@ -1214,7 +1210,6 @@ static int korina_remove(struct platform_device *pdev)
        iounmap(lp->rx_dma_regs);
        iounmap(lp->tx_dma_regs);
 
-       platform_set_drvdata(pdev, NULL);
        unregister_netdev(bif->dev);
        free_netdev(bif->dev);
 
index d0afeea181fb0fd35918941c6e19a0008ded1141..510d50603a02a2fb92a1967a0ef769df157fb2ed 100644 (file)
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
 
 static char mv643xx_eth_driver_name[] = "mv643xx_eth";
 static char mv643xx_eth_driver_version[] = "1.4";
@@ -115,6 +119,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
 #define  LINK_UP                       0x00000002
 #define TXQ_COMMAND                    0x0048
 #define TXQ_FIX_PRIO_CONF              0x004c
+#define PORT_SERIAL_CONTROL1           0x004c
+#define  CLK125_BYPASS_EN              0x00000010
 #define TX_BW_RATE                     0x0050
 #define TX_BW_MTU                      0x0058
 #define TX_BW_BURST                    0x005c
@@ -615,7 +621,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
 
                rx_desc = rxq->rx_desc_area + rx;
 
-               size = skb->end - skb->data;
+               size = skb_end_pointer(skb) - skb->data;
                rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
                                                  skb->data, size,
                                                  DMA_FROM_DEVICE);
@@ -867,7 +873,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
        struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
        int reclaimed;
 
-       __netif_tx_lock(nq, smp_processor_id());
+       __netif_tx_lock_bh(nq);
 
        reclaimed = 0;
        while (reclaimed < budget && txq->tx_desc_count > 0) {
@@ -913,7 +919,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                dev_kfree_skb(skb);
        }
 
-       __netif_tx_unlock(nq);
+       __netif_tx_unlock_bh(nq);
 
        if (reclaimed < budget)
                mp->work_tx &= ~(1 << txq->index);
@@ -2450,13 +2456,150 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
        }
 }
 
+#if defined(CONFIG_OF)
+static const struct of_device_id mv643xx_eth_shared_ids[] = {
+       { .compatible = "marvell,orion-eth", },
+       { .compatible = "marvell,kirkwood-eth", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
+#endif
+
+#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
+#define mv643xx_eth_property(_np, _name, _v)                           \
+       do {                                                            \
+               u32 tmp;                                                \
+               if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
+                       _v = tmp;                                       \
+       } while (0)
+
+static struct platform_device *port_platdev[3];
+
+static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+                                         struct device_node *pnp)
+{
+       struct platform_device *ppdev;
+       struct mv643xx_eth_platform_data ppd;
+       struct resource res;
+       const char *mac_addr;
+       int ret;
+
+       memset(&ppd, 0, sizeof(ppd));
+       ppd.shared = pdev;
+
+       memset(&res, 0, sizeof(res));
+       if (!of_irq_to_resource(pnp, 0, &res)) {
+               dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
+               dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
+               return -EINVAL;
+       }
+
+       if (ppd.port_number >= 3) {
+               dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
+               return -EINVAL;
+       }
+
+       mac_addr = of_get_mac_address(pnp);
+       if (mac_addr)
+               memcpy(ppd.mac_addr, mac_addr, 6);
+
+       mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
+       mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
+       mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
+       mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
+       mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
+       mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
+
+       ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
+       if (!ppd.phy_node) {
+               ppd.phy_addr = MV643XX_ETH_PHY_NONE;
+               of_property_read_u32(pnp, "speed", &ppd.speed);
+               of_property_read_u32(pnp, "duplex", &ppd.duplex);
+       }
+
+       ppdev = platform_device_alloc(MV643XX_ETH_NAME, ppd.port_number);
+       if (!ppdev)
+               return -ENOMEM;
+       ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+       ret = platform_device_add_resources(ppdev, &res, 1);
+       if (ret)
+               goto port_err;
+
+       ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
+       if (ret)
+               goto port_err;
+
+       ret = platform_device_add(ppdev);
+       if (ret)
+               goto port_err;
+
+       port_platdev[ppd.port_number] = ppdev;
+
+       return 0;
+
+port_err:
+       platform_device_put(ppdev);
+       return ret;
+}
+
+static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
+{
+       struct mv643xx_eth_shared_platform_data *pd;
+       struct device_node *pnp, *np = pdev->dev.of_node;
+       int ret;
+
+       /* bail out if not registered from DT */
+       if (!np)
+               return 0;
+
+       pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               return -ENOMEM;
+       pdev->dev.platform_data = pd;
+
+       mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
+
+       for_each_available_child_of_node(np, pnp) {
+               ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+static void mv643xx_eth_shared_of_remove(void)
+{
+       int n;
+
+       for (n = 0; n < 3; n++) {
+               platform_device_del(port_platdev[n]);
+               port_platdev[n] = NULL;
+       }
+}
+#else
+static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static inline void mv643xx_eth_shared_of_remove(void)
+{
+}
+#endif
+
 static int mv643xx_eth_shared_probe(struct platform_device *pdev)
 {
        static int mv643xx_eth_version_printed;
-       struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
+       struct mv643xx_eth_shared_platform_data *pd;
        struct mv643xx_eth_shared_private *msp;
        const struct mbus_dram_target_info *dram;
        struct resource *res;
+       int ret;
 
        if (!mv643xx_eth_version_printed++)
                pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
@@ -2469,8 +2612,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
        msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
        if (msp == NULL)
                return -ENOMEM;
+       platform_set_drvdata(pdev, msp);
 
-       msp->base = ioremap(res->start, resource_size(res));
+       msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
        if (msp->base == NULL)
                return -ENOMEM;
 
@@ -2485,12 +2629,15 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
        if (dram)
                mv643xx_eth_conf_mbus_windows(msp, dram);
 
+       ret = mv643xx_eth_shared_of_probe(pdev);
+       if (ret)
+               return ret;
+       pd = pdev->dev.platform_data;
+
        msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
                                        pd->tx_csum_limit : 9 * 1024;
        infer_hw_params(msp);
 
-       platform_set_drvdata(pdev, msp);
-
        return 0;
 }
 
@@ -2498,10 +2645,9 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
 {
        struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
 
-       iounmap(msp->base);
+       mv643xx_eth_shared_of_remove();
        if (!IS_ERR(msp->clk))
                clk_disable_unprepare(msp->clk);
-
        return 0;
 }
 
@@ -2511,6 +2657,7 @@ static struct platform_driver mv643xx_eth_shared_driver = {
        .driver = {
                .name   = MV643XX_ETH_SHARED_NAME,
                .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
        },
 };
 
@@ -2701,6 +2848,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        mp->dev = dev;
 
+       /* Kirkwood resets some registers on gated clocks. Especially
+        * CLK125_BYPASS_EN must be cleared but is not available on
+        * all other SoCs/System Controllers using this driver.
+        */
+       if (of_device_is_compatible(pdev->dev.of_node,
+                                   "marvell,kirkwood-eth-port"))
+               wrlp(mp, PORT_SERIAL_CONTROL1,
+                    rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
+
        /*
         * Start with a default rate, and if there is a clock, allow
         * it to override the default.
@@ -2710,23 +2866,35 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        if (!IS_ERR(mp->clk)) {
                clk_prepare_enable(mp->clk);
                mp->t_clk = clk_get_rate(mp->clk);
+       } else if (!IS_ERR(mp->shared->clk)) {
+               mp->t_clk = clk_get_rate(mp->shared->clk);
        }
 
        set_params(mp, pd);
        netif_set_real_num_tx_queues(dev, mp->txq_count);
        netif_set_real_num_rx_queues(dev, mp->rxq_count);
 
-       if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
+       err = 0;
+       if (pd->phy_node) {
+               mp->phy = of_phy_connect(mp->dev, pd->phy_node,
+                                        mv643xx_eth_adjust_link, 0,
+                                        PHY_INTERFACE_MODE_GMII);
+               if (!mp->phy)
+                       err = -ENODEV;
+       } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
                mp->phy = phy_scan(mp, pd->phy_addr);
 
-               if (IS_ERR(mp->phy)) {
+               if (IS_ERR(mp->phy))
                        err = PTR_ERR(mp->phy);
-                       if (err == -ENODEV)
-                               err = -EPROBE_DEFER;
-                       goto out;
-               }
-               phy_init(mp, pd->speed, pd->duplex);
+               else
+                       phy_init(mp, pd->speed, pd->duplex);
        }
+       if (err == -ENODEV) {
+               err = -EPROBE_DEFER;
+               goto out;
+       }
+       if (err)
+               goto out;
 
        SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
 
@@ -2745,7 +2913,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
 
-       netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
+       netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
 
        init_timer(&mp->rx_oom);
        mp->rx_oom.data = (unsigned long)mp;
@@ -2805,7 +2973,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
 
        unregister_netdev(mp->dev);
        if (mp->phy != NULL)
-               phy_detach(mp->phy);
+               phy_disconnect(mp->phy);
        cancel_work_sync(&mp->tx_timeout_task);
 
        if (!IS_ERR(mp->clk))
@@ -2813,8 +2981,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
 
        free_netdev(mp->dev);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index c96678555233c4afc1336c012c17c3b0da943060..712779fb12b7d80416db0a4349257f046ae0d11d 100644 (file)
@@ -2251,6 +2251,21 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
        return 0;
 }
 
+/* Get mac address */
+static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
+{
+       u32 mac_addr_l, mac_addr_h;
+
+       mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
+       mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
+       addr[0] = (mac_addr_h >> 24) & 0xFF;
+       addr[1] = (mac_addr_h >> 16) & 0xFF;
+       addr[2] = (mac_addr_h >> 8) & 0xFF;
+       addr[3] = mac_addr_h & 0xFF;
+       addr[4] = (mac_addr_l >> 8) & 0xFF;
+       addr[5] = mac_addr_l & 0xFF;
+}
+
 /* Handle setting mac address */
 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
 {
@@ -2667,7 +2682,9 @@ static int mvneta_probe(struct platform_device *pdev)
        u32 phy_addr;
        struct mvneta_port *pp;
        struct net_device *dev;
-       const char *mac_addr;
+       const char *dt_mac_addr;
+       char hw_mac_addr[ETH_ALEN];
+       const char *mac_from;
        int phy_mode;
        int err;
 
@@ -2703,13 +2720,6 @@ static int mvneta_probe(struct platform_device *pdev)
                goto err_free_irq;
        }
 
-       mac_addr = of_get_mac_address(dn);
-
-       if (!mac_addr || !is_valid_ether_addr(mac_addr))
-               eth_hw_addr_random(dev);
-       else
-               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
-
        dev->tx_queue_len = MVNETA_MAX_TXD;
        dev->watchdog_timeo = 5 * HZ;
        dev->netdev_ops = &mvneta_netdev_ops;
@@ -2740,6 +2750,21 @@ static int mvneta_probe(struct platform_device *pdev)
 
        clk_prepare_enable(pp->clk);
 
+       dt_mac_addr = of_get_mac_address(dn);
+       if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+               mac_from = "device tree";
+               memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
+       } else {
+               mvneta_get_mac_addr(pp, hw_mac_addr);
+               if (is_valid_ether_addr(hw_mac_addr)) {
+                       mac_from = "hardware";
+                       memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+               } else {
+                       mac_from = "random";
+                       eth_hw_addr_random(dev);
+               }
+       }
+
        pp->tx_done_timer.data = (unsigned long)dev;
 
        pp->tx_ring_size = MVNETA_MAX_TXD;
@@ -2772,7 +2797,8 @@ static int mvneta_probe(struct platform_device *pdev)
                goto err_deinit;
        }
 
-       netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+       netdev_info(dev, "Using %s mac address %pM\n", mac_from,
+                   dev->dev_addr);
 
        platform_set_drvdata(pdev, pp->dev);
 
@@ -2804,8 +2830,6 @@ static int mvneta_remove(struct platform_device *pdev)
        irq_dispose_mapping(dev->irq);
        free_netdev(dev);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 339bb323cb0c99c91bf2741cbfea918e7ae4499d..ec20508f0d6b093197ec62707afcadb27347f590 100644 (file)
@@ -357,7 +357,7 @@ static void rxq_refill(struct net_device *dev)
                /* Get 'used' Rx descriptor */
                used_rx_desc = pep->rx_used_desc_q;
                p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
-               size = skb->end - skb->data;
+               size = skb_end_pointer(skb) - skb->data;
                p_used_rx_desc->buf_ptr = dma_map_single(NULL,
                                                         skb->data,
                                                         size,
@@ -1602,7 +1602,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
        unregister_netdev(dev);
        cancel_work_sync(&pep->tx_timeout_task);
        free_netdev(dev);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 171f4b3dda07ae5fc4f9986c36102beeab8de49e..c896079728e1312893fdf03f0b0e111f2a6676cc 100644 (file)
@@ -3706,7 +3706,7 @@ static const struct file_operations skge_debug_fops = {
 static int skge_device_event(struct notifier_block *unused,
                             unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct skge_port *skge;
        struct dentry *d;
 
index d175bbd3ffd37952f5dc2e66348049693ac7d2d7..e09a8c6f85366c4c1251f5365549e154f743f08c 100644 (file)
@@ -4642,7 +4642,7 @@ static const struct file_operations sky2_debug_fops = {
 static int sky2_device_event(struct notifier_block *unused,
                             unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct sky2_port *sky2 = netdev_priv(dev);
 
        if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
index 1df56cc50ee93702128859e7a204a84a78f5fcd6..df04c8206ebe2e52bd0a3127e764587afa81b951 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/errno.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/device.h>
 #include <linux/semaphore.h>
 #include <rdma/ib_smi.h>
 
@@ -222,8 +223,6 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
                 * FLR process. The only non-zero result in the RESET command
                 * is MLX4_DELAY_RESET_SLAVE*/
                if ((MLX4_COMM_CMD_RESET == cmd)) {
-                       mlx4_warn(dev, "Got slave FLRed from Communication"
-                                 " channel (ret:0x%x)\n", ret_from_pending);
                        err = MLX4_DELAY_RESET_SLAVE;
                } else {
                        mlx4_warn(dev, "Communication channel timed out\n");
@@ -258,6 +257,8 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
 
        if (!wait_for_completion_timeout(&context->done,
                                         msecs_to_jiffies(timeout))) {
+               mlx4_warn(dev, "communication channel command 0x%x timed out\n",
+                         op);
                err = -EBUSY;
                goto out;
        }
@@ -487,6 +488,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        }
 
        if (cmd_pending(dev)) {
+               mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
+                         op);
                err = -ETIMEDOUT;
                goto out;
        }
@@ -550,6 +553,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 
        if (!wait_for_completion_timeout(&context->done,
                                         msecs_to_jiffies(timeout))) {
+               mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
+                         op);
                err = -EBUSY;
                goto out;
        }
@@ -2180,7 +2185,54 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
        ivf->qos        = s_info->default_qos;
        ivf->tx_rate    = s_info->tx_rate;
        ivf->spoofchk   = s_info->spoofchk;
+       ivf->linkstate  = s_info->link_state;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
+
+int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_vport_state *s_info;
+       struct mlx4_vport_oper_state *vp_oper;
+       int slave;
+       u8 link_stat_event;
+
+       slave = mlx4_get_slave_indx(dev, vf);
+       if (slave < 0)
+               return -EINVAL;
+
+       switch (link_state) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               /* get current link state */
+               if (!priv->sense.do_sense_port[port])
+                       link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
+               else
+                       link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
+           break;
+
+       case IFLA_VF_LINK_STATE_ENABLE:
+               link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
+           break;
+
+       case IFLA_VF_LINK_STATE_DISABLE:
+               link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
+           break;
+
+       default:
+               mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
+                         link_state, slave, port);
+               return -EINVAL;
+       };
+       /* update the admin & oper state on the link state */
+       s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
+       vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+       s_info->link_state = link_state;
+       vp_oper->state.link_state = link_state;
+
+       /* send event */
+       mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
index 1e6c594d6d04d282c0dd453ef6a9351d34e98983..3e2d5047cdb39321a35cd27359dbdeb6f277391b 100644 (file)
@@ -139,6 +139,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
 
        if (!cq->is_tx) {
                netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+               napi_hash_add(&cq->napi);
                napi_enable(&cq->napi);
        }
 
@@ -162,6 +163,8 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
 {
        if (!cq->is_tx) {
                napi_disable(&cq->napi);
+               napi_hash_del(&cq->napi);
+               synchronize_rcu();
                netif_napi_del(&cq->napi);
        }
 
index 0f91222ea3d77dd1a2ff29c90521da77d560c9f3..9d4a1ea030d84a95055afbc07a08dd131d885f8d 100644 (file)
@@ -207,9 +207,6 @@ static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int i;
 
-       if (!priv->maxrate)
-               return -EINVAL;
-
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
                maxrate->tc_maxrate[i] =
                        priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
index c9e6b62dd000955565fb0186d334221a834b0a40..727874f575cedc80e65d52d4918007151feb3c09 100644 (file)
@@ -222,7 +222,12 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
        switch (sset) {
        case ETH_SS_STATS:
                return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
-                       (priv->tx_ring_num + priv->rx_ring_num) * 2;
+                       (priv->tx_ring_num * 2) +
+#ifdef CONFIG_NET_LL_RX_POLL
+                       (priv->rx_ring_num * 5);
+#else
+                       (priv->rx_ring_num * 2);
+#endif
        case ETH_SS_TEST:
                return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -271,6 +276,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
        for (i = 0; i < priv->rx_ring_num; i++) {
                data[index++] = priv->rx_ring[i].packets;
                data[index++] = priv->rx_ring[i].bytes;
+#ifdef CONFIG_NET_LL_RX_POLL
+               data[index++] = priv->rx_ring[i].yields;
+               data[index++] = priv->rx_ring[i].misses;
+               data[index++] = priv->rx_ring[i].cleaned;
+#endif
        }
        spin_unlock_bh(&priv->stats_lock);
 
@@ -334,6 +344,14 @@ static void mlx4_en_get_strings(struct net_device *dev,
                                "rx%d_packets", i);
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "rx%d_bytes", i);
+#ifdef CONFIG_NET_LL_RX_POLL
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_napi_yield", i);
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_misses", i);
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_cleaned", i);
+#endif
                }
                break;
        }
index a5c9df07a7d00ee6da76a091a2cca99c779767ba..a071cda2dd04ad880ce3862d7c4011839524dd13 100644 (file)
@@ -310,7 +310,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
 err_mr:
        (void) mlx4_mr_free(dev, &mdev->mr);
 err_map:
-       if (!mdev->uar_map)
+       if (mdev->uar_map)
                iounmap(mdev->uar_map);
 err_uar:
        mlx4_uar_free(dev, &mdev->priv_uar);
index b35f9470009363bad560f7142322436baa76d8a3..caf2047705697f5e370407f46e5b521304ba83f4 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/hash.h>
 #include <net/ip.h>
+#include <net/ll_poll.h>
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/device.h>
@@ -67,6 +68,34 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
        return 0;
 }
 
+#ifdef CONFIG_NET_LL_RX_POLL
+/* must be called with local_bh_disable()d */
+static int mlx4_en_low_latency_recv(struct napi_struct *napi)
+{
+       struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+       struct net_device *dev = cq->dev;
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+       int done;
+
+       if (!priv->port_up)
+               return LL_FLUSH_FAILED;
+
+       if (!mlx4_en_cq_lock_poll(cq))
+               return LL_FLUSH_BUSY;
+
+       done = mlx4_en_process_rx_cq(dev, cq, 4);
+       if (likely(done))
+               rx_ring->cleaned += done;
+       else
+               rx_ring->misses++;
+
+       mlx4_en_cq_unlock_poll(cq);
+
+       return done;
+}
+#endif /* CONFIG_NET_LL_RX_POLL */
+
 #ifdef CONFIG_RFS_ACCEL
 
 struct mlx4_en_filter {
@@ -376,7 +405,7 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
                        en_err(priv, "Failed configuring VLAN filter\n");
        }
        if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
-               en_err(priv, "failed adding vlan %d\n", vid);
+               en_dbg(HW, priv, "failed adding vlan %d\n", vid);
        mutex_unlock(&mdev->state_lock);
 
        return 0;
@@ -399,7 +428,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
        if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
                mlx4_unregister_vlan(mdev->dev, priv->port, idx);
        else
-               en_err(priv, "could not find vid %d in cache\n", vid);
+               en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
 
        if (mdev->device_up && priv->port_up) {
                err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1207,10 +1236,19 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       int i;
 
        if (netif_msg_timer(priv))
                en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
 
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
+                       continue;
+               en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
+                       i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
+                       priv->tx_ring[i].cons, priv->tx_ring[i].prod);
+       }
+
        priv->port_stats.tx_timeout++;
        en_dbg(DRV, priv, "Scheduling watchdog\n");
        queue_work(mdev->workqueue, &priv->watchdog_task);
@@ -1323,6 +1361,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
                        priv->last_moder_time[ring] = moder_time;
                        cq = &priv->rx_cq[ring];
                        cq->moder_time = moder_time;
+                       cq->moder_cnt = priv->rx_frames;
                        err = mlx4_en_set_cq_moder(priv, cq);
                        if (err)
                                en_err(priv, "Failed modifying moderation for cq:%d\n",
@@ -1345,12 +1384,13 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
 
        mutex_lock(&mdev->state_lock);
        if (mdev->device_up) {
-               err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
-               if (err)
-                       en_dbg(HW, priv, "Could not update stats\n");
+               if (priv->port_up) {
+                       err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+                       if (err)
+                               en_dbg(HW, priv, "Could not update stats\n");
 
-               if (priv->port_up)
                        mlx4_en_auto_moderation(priv);
+               }
 
                queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
        }
@@ -1444,6 +1484,8 @@ int mlx4_en_start_port(struct net_device *dev)
        for (i = 0; i < priv->rx_ring_num; i++) {
                cq = &priv->rx_cq[i];
 
+               mlx4_en_cq_init_lock(cq);
+
                err = mlx4_en_activate_cq(priv, cq, i);
                if (err) {
                        en_err(priv, "Failed activating Rx CQ\n");
@@ -1602,6 +1644,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
                return;
        }
 
+       /* close port*/
+       mlx4_CLOSE_PORT(mdev->dev, priv->port);
+
        /* Synchronize with tx routine */
        netif_tx_lock_bh(dev);
        if (detach)
@@ -1693,14 +1738,20 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
 
        /* Free RX Rings */
        for (i = 0; i < priv->rx_ring_num; i++) {
-               mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
-               while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
+               struct mlx4_en_cq *cq = &priv->rx_cq[i];
+
+               local_bh_disable();
+               while (!mlx4_en_cq_lock_napi(cq)) {
+                       pr_info("CQ %d locked\n", i);
+                       mdelay(1);
+               }
+               local_bh_enable();
+
+               while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
                        msleep(1);
-               mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
+               mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+               mlx4_en_deactivate_cq(priv, cq);
        }
-
-       /* close port*/
-       mlx4_CLOSE_PORT(mdev->dev, priv->port);
 }
 
 static void mlx4_en_restart(struct work_struct *work)
@@ -2060,6 +2111,13 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_
        return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
 }
 
+static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+{
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = en_priv->mdev;
+
+       return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
+}
 static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_open               = mlx4_en_open,
        .ndo_stop               = mlx4_en_close,
@@ -2082,6 +2140,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
 #endif
+#ifdef CONFIG_NET_LL_RX_POLL
+       .ndo_ll_poll            = mlx4_en_low_latency_recv,
+#endif
 };
 
 static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2100,6 +2161,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
        .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
        .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
+       .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
        .ndo_get_vf_config      = mlx4_en_get_vf_config,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = mlx4_en_netpoll,
@@ -2118,6 +2180,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        struct mlx4_en_priv *priv;
        int i;
        int err;
+       u64 mac_u64;
 
        dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
                                 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2191,10 +2254,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->addr_len = ETH_ALEN;
        mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
        if (!is_valid_ether_addr(dev->dev_addr)) {
-               en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
-                      priv->port, dev->dev_addr);
-               err = -EINVAL;
-               goto out;
+               if (mlx4_is_slave(priv->mdev->dev)) {
+                       eth_hw_addr_random(dev);
+                       en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
+                       mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
+                       mdev->dev->caps.def_mac[priv->port] = mac_u64;
+               } else {
+                       en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+                              priv->port, dev->dev_addr);
+                       err = -EINVAL;
+                       goto out;
+               }
        }
 
        memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
@@ -2262,6 +2332,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        mdev->pndev[port] = dev;
 
        netif_carrier_off(dev);
+       mlx4_en_set_default_moderation(priv);
+
        err = register_netdev(dev);
        if (err) {
                en_err(priv, "Netdev registration failed for port %d\n", port);
@@ -2293,7 +2365,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                en_err(priv, "Failed Initializing port\n");
                goto out;
        }
-       mlx4_en_set_default_moderation(priv);
        queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
 
        if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
index 91f2b2c43c12669f796ddf0327baa18c33025891..d3f508697a3dd664f65a687c81964f30fee04a93 100644 (file)
@@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
        if (user_prio >= 0) {
                context->pri_path.sched_queue |= user_prio << 3;
-               context->pri_path.feup = 1 << 6;
+               context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
        }
        context->pri_path.counter_index = 0xff;
        context->cqn_send = cpu_to_be32(cqn);
index 02aee1ebd20336e49a7acd39bfeff3f6035c97e7..76997b93fdfebda261f26621b27b19e1f9e95feb 100644 (file)
@@ -31,6 +31,7 @@
  *
  */
 
+#include <net/ll_poll.h>
 #include <linux/mlx4/cq.h>
 #include <linux/slab.h>
 #include <linux/mlx4/qp.h>
 
 #include "mlx4_en.h"
 
+static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
+                           struct mlx4_en_rx_alloc *page_alloc,
+                           const struct mlx4_en_frag_info *frag_info,
+                           gfp_t _gfp)
+{
+       int order;
+       struct page *page;
+       dma_addr_t dma;
+
+       for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) {
+               gfp_t gfp = _gfp;
+
+               if (order)
+                       gfp |= __GFP_COMP | __GFP_NOWARN;
+               page = alloc_pages(gfp, order);
+               if (likely(page))
+                       break;
+               if (--order < 0 ||
+                   ((PAGE_SIZE << order) < frag_info->frag_size))
+                       return -ENOMEM;
+       }
+       dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
+                          PCI_DMA_FROMDEVICE);
+       if (dma_mapping_error(priv->ddev, dma)) {
+               put_page(page);
+               return -ENOMEM;
+       }
+       page_alloc->size = PAGE_SIZE << order;
+       page_alloc->page = page;
+       page_alloc->dma = dma;
+       page_alloc->offset = frag_info->frag_align;
+       /* Not doing get_page() for each frag is a big win
+        * on asymetric workloads.
+        */
+       atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+       return 0;
+}
+
 static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
                               struct mlx4_en_rx_desc *rx_desc,
                               struct mlx4_en_rx_alloc *frags,
-                              struct mlx4_en_rx_alloc *ring_alloc)
+                              struct mlx4_en_rx_alloc *ring_alloc,
+                              gfp_t gfp)
 {
        struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
-       struct mlx4_en_frag_info *frag_info;
+       const struct mlx4_en_frag_info *frag_info;
        struct page *page;
        dma_addr_t dma;
        int i;
 
        for (i = 0; i < priv->num_frags; i++) {
                frag_info = &priv->frag_info[i];
-               if (ring_alloc[i].offset == frag_info->last_offset) {
-                       page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
-                                       MLX4_EN_ALLOC_ORDER);
-                       if (!page)
-                               goto out;
-                       dma = dma_map_page(priv->ddev, page, 0,
-                               MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
-                       if (dma_mapping_error(priv->ddev, dma)) {
-                               put_page(page);
-                               goto out;
-                       }
-                       page_alloc[i].page = page;
-                       page_alloc[i].dma = dma;
-                       page_alloc[i].offset = frag_info->frag_align;
-               } else {
-                       page_alloc[i].page = ring_alloc[i].page;
-                       get_page(ring_alloc[i].page);
-                       page_alloc[i].dma = ring_alloc[i].dma;
-                       page_alloc[i].offset = ring_alloc[i].offset +
-                                               frag_info->frag_stride;
-               }
+               page_alloc[i] = ring_alloc[i];
+               page_alloc[i].offset += frag_info->frag_stride;
+               if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+                       continue;
+               if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
+                       goto out;
        }
 
        for (i = 0; i < priv->num_frags; i++) {
@@ -87,14 +112,16 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
 
        return 0;
 
-
 out:
        while (i--) {
                frag_info = &priv->frag_info[i];
-               if (ring_alloc[i].offset == frag_info->last_offset)
+               if (page_alloc[i].page != ring_alloc[i].page) {
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
-                               MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
-               put_page(page_alloc[i].page);
+                               page_alloc[i].size, PCI_DMA_FROMDEVICE);
+                       page = page_alloc[i].page;
+                       atomic_set(&page->_count, 1);
+                       put_page(page);
+               }
        }
        return -ENOMEM;
 }
@@ -103,12 +130,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
                              struct mlx4_en_rx_alloc *frags,
                              int i)
 {
-       struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+       const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
 
-       if (frags[i].offset == frag_info->last_offset) {
-               dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE,
+       if (frags[i].offset + frag_info->frag_stride > frags[i].size)
+               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
                                         PCI_DMA_FROMDEVICE);
-       }
+
        if (frags[i].page)
                put_page(frags[i].page);
 }
@@ -116,35 +143,28 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
 static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
                                  struct mlx4_en_rx_ring *ring)
 {
-       struct mlx4_en_rx_alloc *page_alloc;
        int i;
+       struct mlx4_en_rx_alloc *page_alloc;
 
        for (i = 0; i < priv->num_frags; i++) {
-               page_alloc = &ring->page_alloc[i];
-               page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
-                                              MLX4_EN_ALLOC_ORDER);
-               if (!page_alloc->page)
-                       goto out;
+               const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
 
-               page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0,
-                                       MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
-               if (dma_mapping_error(priv->ddev, page_alloc->dma)) {
-                       put_page(page_alloc->page);
-                       page_alloc->page = NULL;
+               if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
+                                    frag_info, GFP_KERNEL))
                        goto out;
-               }
-               page_alloc->offset = priv->frag_info[i].frag_align;
-               en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
-                      i, page_alloc->page);
        }
        return 0;
 
 out:
        while (i--) {
+               struct page *page;
+
                page_alloc = &ring->page_alloc[i];
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                               MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
-               put_page(page_alloc->page);
+                              page_alloc->size, PCI_DMA_FROMDEVICE);
+               page = page_alloc->page;
+               atomic_set(&page->_count, 1);
+               put_page(page);
                page_alloc->page = NULL;
        }
        return -ENOMEM;
@@ -157,13 +177,18 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
        int i;
 
        for (i = 0; i < priv->num_frags; i++) {
+               const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+
                page_alloc = &ring->page_alloc[i];
                en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
                       i, page_count(page_alloc->page));
 
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                               MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
-               put_page(page_alloc->page);
+                               page_alloc->size, PCI_DMA_FROMDEVICE);
+               while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+                       put_page(page_alloc->page);
+                       page_alloc->offset += frag_info->frag_stride;
+               }
                page_alloc->page = NULL;
        }
 }
@@ -194,13 +219,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
 }
 
 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
-                                  struct mlx4_en_rx_ring *ring, int index)
+                                  struct mlx4_en_rx_ring *ring, int index,
+                                  gfp_t gfp)
 {
        struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
        struct mlx4_en_rx_alloc *frags = ring->rx_info +
                                        (index << priv->log_rx_info);
 
-       return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc);
+       return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
 }
 
 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
@@ -234,7 +260,8 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
                        ring = &priv->rx_ring[ring_ind];
 
                        if (mlx4_en_prepare_rx_desc(priv, ring,
-                                                   ring->actual_size)) {
+                                                   ring->actual_size,
+                                                   GFP_KERNEL)) {
                                if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
                                        en_err(priv, "Failed to allocate "
                                                     "enough rx buffers\n");
@@ -449,11 +476,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                                        DMA_FROM_DEVICE);
 
                /* Save page reference in skb */
-               get_page(frags[nr].page);
                __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
                skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
                skb_frags_rx[nr].page_offset = frags[nr].offset;
                skb->truesize += frag_info->frag_stride;
+               frags[nr].page = NULL;
        }
        /* Adjust size of last fragment to match actual length */
        if (nr > 0)
@@ -546,7 +573,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
        int index = ring->prod & ring->size_mask;
 
        while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
-               if (mlx4_en_prepare_rx_desc(priv, ring, index))
+               if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
                        break;
                ring->prod++;
                index = ring->prod & ring->size_mask;
@@ -656,8 +683,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                 * - DIX Ethernet (type interpretation)
                                 * - TCP/IP (v4)
                                 * - without IP options
-                                * - not an IP fragment */
-                               if (dev->features & NETIF_F_GRO) {
+                                * - not an IP fragment
+                                * - no LLS polling in progress
+                                */
+                               if (!mlx4_en_cq_ll_polling(cq) &&
+                                   (dev->features & NETIF_F_GRO)) {
                                        struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
                                        if (!gro_skb)
                                                goto next;
@@ -737,6 +767,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                               timestamp);
                }
 
+               skb_mark_ll(skb, &cq->napi);
+
                /* Push it up the stack */
                netif_receive_skb(skb);
 
@@ -781,8 +813,13 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int done;
 
+       if (!mlx4_en_cq_lock_napi(cq))
+               return budget;
+
        done = mlx4_en_process_rx_cq(dev, cq, budget);
 
+       mlx4_en_cq_unlock_napi(cq);
+
        /* If we used up all the quota - we're probably not done yet... */
        if (done == budget)
                INC_PERF_COUNTER(priv->pstats.napi_quota);
@@ -794,21 +831,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
        return done;
 }
 
-
-/* Calculate the last offset position that accommodates a full fragment
- * (assuming fagment size = stride-align) */
-static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
-{
-       u16 res = MLX4_EN_ALLOC_SIZE % stride;
-       u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
-
-       en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
-                           "res:%d offset:%d\n", stride, align, res, offset);
-       return offset;
-}
-
-
-static int frag_sizes[] = {
+static const int frag_sizes[] = {
        FRAG_SZ0,
        FRAG_SZ1,
        FRAG_SZ2,
@@ -836,9 +859,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
                        priv->frag_info[i].frag_stride =
                                ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
                }
-               priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
-                                               priv, priv->frag_info[i].frag_stride,
-                                               priv->frag_info[i].frag_align);
                buf_size += priv->frag_info[i].frag_size;
                i++;
        }
@@ -850,13 +870,13 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
        en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
                  "num_frags:%d):\n", eff_mtu, priv->num_frags);
        for (i = 0; i < priv->num_frags; i++) {
-               en_dbg(DRV, priv, "  frag:%d - size:%d prefix:%d align:%d "
-                               "stride:%d last_offset:%d\n", i,
-                               priv->frag_info[i].frag_size,
-                               priv->frag_info[i].frag_prefix_size,
-                               priv->frag_info[i].frag_align,
-                               priv->frag_info[i].frag_stride,
-                               priv->frag_info[i].last_offset);
+               en_err(priv,
+                      "  frag:%d - size:%d prefix:%d align:%d stride:%d\n",
+                      i,
+                      priv->frag_info[i].frag_size,
+                      priv->frag_info[i].frag_prefix_size,
+                      priv->frag_info[i].frag_align,
+                      priv->frag_info[i].frag_stride);
        }
 }
 
index 4e6877a032a8414bd059e2dcf658616dc2cab0fe..7c492382da09937764e8c6bb9a835d1c05771021 100644 (file)
@@ -544,7 +544,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
        if (vlan_tx_tag_present(skb))
                up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
 
-       return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up;
+       return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
 }
 
 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
index 6000342f9725db0f29e8bac3a249765e53c90173..7e042869ef0cd2d9c5884147edd7b1c7d582b868 100644 (file)
@@ -448,6 +448,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
        int i;
        enum slave_port_gen_event gen_event;
        unsigned long flags;
+       struct mlx4_vport_state *s_info;
 
        while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
                /*
@@ -556,7 +557,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
                                                         " to slave: %d, port:%d\n",
                                                         __func__, i, port);
-                                               mlx4_slave_event(dev, i, eqe);
+                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+                                                       mlx4_slave_event(dev, i, eqe);
                                        } else {  /* IB port */
                                                set_and_calc_slave_port_state(dev, i, port,
                                                                              MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,7 +583,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                        for (i = 0; i < dev->num_slaves; i++) {
                                                if (i == mlx4_master_func_num(dev))
                                                        continue;
-                                               mlx4_slave_event(dev, i, eqe);
+                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+                                                       mlx4_slave_event(dev, i, eqe);
                                        }
                                else /* IB port */
                                        /* port-up event will be sent to a slave when the
index b147bdd40768802df3c11d935b1ed607b3e127c9..569bbe3e7403596b918d2c011d2c3c5448c5f6bb 100644 (file)
@@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [2] = "RSS XOR Hash Function support",
                [3] = "Device manage flow steering support",
                [4] = "Automatic MAC reassignment support",
-               [5] = "Time stamping support"
+               [5] = "Time stamping support",
+               [6] = "VST (control vlan insertion/stripping) support",
+               [7] = "FSM (MAC anti-spoofing) support"
        };
        int i;
 
@@ -828,8 +830,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
        u8 port_type;
        u16 short_field;
        int err;
+       int admin_link_state;
 
 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK        0xE0
+#define MLX4_PORT_LINK_UP_MASK         0x80
 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
 #define QUERY_PORT_CUR_MAX_GID_OFFSET  0x0e
 
@@ -838,12 +842,16 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                           MLX4_CMD_NATIVE);
 
        if (!err && dev->caps.function != slave) {
-               /* set slave default_mac address */
-               MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
-               def_mac += slave << 8;
                /* if config MAC in DB use it */
                if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
                        def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
+               else {
+                       /* set slave default_mac address */
+                       MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+                       def_mac += slave << 8;
+                       priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
+               }
+
                MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
 
                /* get port type - currently only eth is enabled */
@@ -855,6 +863,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                /* set port type to currently operating port type */
                port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
 
+               admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
+               if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
+                       port_type |= MLX4_PORT_LINK_UP_MASK;
+               else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
+                       port_type &= ~MLX4_PORT_LINK_UP_MASK;
+
                MLX4_PUT(outbox->buf, port_type,
                         QUERY_PORT_SUPPORTED_TYPE_OFFSET);
 
index 0d32a82458bfb0d10f95ebd6f914f05b6d5e4682..56160a2bb57ba9aee70155d8a80f008d04b3136b 100644 (file)
@@ -839,11 +839,11 @@ static ssize_t set_port_ib_mtu(struct device *dev,
                return -EINVAL;
        }
 
-       err = sscanf(buf, "%d", &mtu);
-       if (err > 0)
+       err = kstrtoint(buf, 0, &mtu);
+       if (!err)
                ibta_mtu = int_to_ibta_mtu(mtu);
 
-       if (err <= 0 || ibta_mtu < 0) {
+       if (err || ibta_mtu < 0) {
                mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
                return -EINVAL;
        }
@@ -1290,7 +1290,6 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        u64 dma = (u64) priv->mfunc.vhcr_dma;
-       int num_of_reset_retries = NUM_OF_RESET_RETRIES;
        int ret_from_reset = 0;
        u32 slave_read;
        u32 cmd_channel_ver;
@@ -1304,18 +1303,10 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
         * NUM_OF_RESET_RETRIES times before leaving.*/
        if (ret_from_reset) {
                if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
-                       msleep(SLEEP_TIME_IN_RESET);
-                       while (ret_from_reset && num_of_reset_retries) {
-                               mlx4_warn(dev, "slave is currently in the"
-                                         "middle of FLR. retrying..."
-                                         "(try num:%d)\n",
-                                         (NUM_OF_RESET_RETRIES -
-                                          num_of_reset_retries  + 1));
-                               ret_from_reset =
-                                       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
-                                                     0, MLX4_COMM_TIME);
-                               num_of_reset_retries = num_of_reset_retries - 1;
-                       }
+                       mlx4_warn(dev, "slave is currently in the "
+                                 "middle of FLR. Deferring probe.\n");
+                       mutex_unlock(&priv->cmd.slave_cmd_mutex);
+                       return -EPROBE_DEFER;
                } else
                        goto err;
        }
@@ -1526,7 +1517,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        } else {
                err = mlx4_init_slave(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to initialize slave\n");
+                       if (err != -EPROBE_DEFER)
+                               mlx4_err(dev, "Failed to initialize slave\n");
                        return err;
                }
 
@@ -2085,6 +2077,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                       num_vfs, MLX4_MAX_NUM_VF);
                return -EINVAL;
        }
+
+       if (num_vfs < 0) {
+               pr_err("num_vfs module parameter cannot be negative\n");
+               return -EINVAL;
+       }
        /*
         * Check for BARs.
         */
index df15bb6631cc7d6f68b70191e891321f505fe00b..75272935a3f74e42c31bdd774e6aab5f0bcf1854 100644 (file)
@@ -482,6 +482,7 @@ struct mlx4_vport_state {
        u8  default_qos;
        u32 tx_rate;
        bool spoofchk;
+       u32 link_state;
 };
 
 struct mlx4_vf_admin_state {
index b1d7657b2bf559573015cea69a7e1b573453eb1e..35fb60e2320c2a896e8b241c2c9dbbe575a2dd02 100644 (file)
 
 /* Use the maximum between 16384 and a single page */
 #define MLX4_EN_ALLOC_SIZE     PAGE_ALIGN(16384)
-#define MLX4_EN_ALLOC_ORDER    get_order(MLX4_EN_ALLOC_SIZE)
 
-/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
+#define MLX4_EN_ALLOC_PREFER_ORDER     PAGE_ALLOC_COSTLY_ORDER
+
+/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
  * and 4K allocations) */
 enum {
-       FRAG_SZ0 = 512 - NET_IP_ALIGN,
-       FRAG_SZ1 = 1024,
+       FRAG_SZ0 = 1536 - NET_IP_ALIGN,
+       FRAG_SZ1 = 4096,
        FRAG_SZ2 = 4096,
        FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
 };
@@ -234,9 +235,10 @@ struct mlx4_en_tx_desc {
 #define MLX4_EN_CX3_HIGH_ID    0x1005
 
 struct mlx4_en_rx_alloc {
-       struct page *page;
-       dma_addr_t dma;
-       u16 offset;
+       struct page     *page;
+       dma_addr_t      dma;
+       u32             offset;
+       u32             size;
 };
 
 struct mlx4_en_tx_ring {
@@ -290,6 +292,11 @@ struct mlx4_en_rx_ring {
        void *rx_info;
        unsigned long bytes;
        unsigned long packets;
+#ifdef CONFIG_NET_LL_RX_POLL
+       unsigned long yields;
+       unsigned long misses;
+       unsigned long cleaned;
+#endif
        unsigned long csum_ok;
        unsigned long csum_none;
        int hwtstamp_rx_filter;
@@ -310,6 +317,19 @@ struct mlx4_en_cq {
        u16 moder_cnt;
        struct mlx4_cqe *buf;
 #define MLX4_EN_OPCODE_ERROR   0x1e
+
+#ifdef CONFIG_NET_LL_RX_POLL
+       unsigned int state;
+#define MLX4_EN_CQ_STATE_IDLE        0
+#define MLX4_EN_CQ_STATE_NAPI     1    /* NAPI owns this CQ */
+#define MLX4_EN_CQ_STATE_POLL     2    /* poll owns this CQ */
+#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
+#define MLX4_EN_CQ_STATE_NAPI_YIELD  4    /* NAPI yielded this CQ */
+#define MLX4_EN_CQ_STATE_POLL_YIELD  8    /* poll yielded this CQ */
+#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
+#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
+       spinlock_t poll_lock; /* protects from LLS/napi conflicts */
+#endif  /* CONFIG_NET_LL_RX_POLL */
 };
 
 struct mlx4_en_port_profile {
@@ -421,8 +441,6 @@ struct mlx4_en_frag_info {
        u16 frag_prefix_size;
        u16 frag_stride;
        u16 frag_align;
-       u16 last_offset;
-
 };
 
 #ifdef CONFIG_MLX4_EN_DCB
@@ -562,6 +580,115 @@ struct mlx4_mac_entry {
        struct rcu_head rcu;
 };
 
+#ifdef CONFIG_NET_LL_RX_POLL
+static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
+{
+       spin_lock_init(&cq->poll_lock);
+       cq->state = MLX4_EN_CQ_STATE_IDLE;
+}
+
+/* called from the device poll rutine to get ownership of a cq */
+static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
+{
+       int rc = true;
+       spin_lock(&cq->poll_lock);
+       if (cq->state & MLX4_CQ_LOCKED) {
+               WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI);
+               cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD;
+               rc = false;
+       } else
+               /* we don't care if someone yielded */
+               cq->state = MLX4_EN_CQ_STATE_NAPI;
+       spin_unlock(&cq->poll_lock);
+       return rc;
+}
+
+/* returns true is someone tried to get the cq while napi had it */
+static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
+{
+       int rc = false;
+       spin_lock(&cq->poll_lock);
+       WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL |
+                              MLX4_EN_CQ_STATE_NAPI_YIELD));
+
+       if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
+               rc = true;
+       cq->state = MLX4_EN_CQ_STATE_IDLE;
+       spin_unlock(&cq->poll_lock);
+       return rc;
+}
+
+/* called from mlx4_en_low_latency_poll() */
+static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
+{
+       int rc = true;
+       spin_lock_bh(&cq->poll_lock);
+       if ((cq->state & MLX4_CQ_LOCKED)) {
+               struct net_device *dev = cq->dev;
+               struct mlx4_en_priv *priv = netdev_priv(dev);
+               struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+
+               cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
+               rc = false;
+               rx_ring->yields++;
+       } else
+               /* preserve yield marks */
+               cq->state |= MLX4_EN_CQ_STATE_POLL;
+       spin_unlock_bh(&cq->poll_lock);
+       return rc;
+}
+
+/* returns true if someone tried to get the cq while it was locked */
+static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
+{
+       int rc = false;
+       spin_lock_bh(&cq->poll_lock);
+       WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI));
+
+       if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
+               rc = true;
+       cq->state = MLX4_EN_CQ_STATE_IDLE;
+       spin_unlock_bh(&cq->poll_lock);
+       return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+{
+       WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
+       return cq->state & CQ_USER_PEND;
+}
+#else
+static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
+{
+}
+
+static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
+{
+       return true;
+}
+
+static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
+{
+       return false;
+}
+
+static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
+{
+       return false;
+}
+
+static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
+{
+       return false;
+}
+
+static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+{
+       return false;
+}
+#endif /* CONFIG_NET_LL_RX_POLL */
+
 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
 
 void mlx4_en_update_loopback_state(struct net_device *dev,
index e12e0d2e0ee00c3fad03c1c012707c55d6f21a88..1157f028a90f341a50c300c40ba8e967d9fb2e01 100644 (file)
@@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                if (MLX4_QP_ST_RC == qp_type)
                        return -EINVAL;
 
+               /* force strip vlan by clear vsd */
+               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+               if (0 != vp_oper->state.default_vlan) {
+                       qpc->pri_path.vlan_control =
+                               MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+                               MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+                               MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+               } else { /* priority tagged */
+                       qpc->pri_path.vlan_control =
+                               MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+                               MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
+               }
+
+               qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
-               qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
-               qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
+               qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+               qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
                qpc->pri_path.sched_queue &= 0xC7;
                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
-               mlx4_dbg(dev, "qp %d  port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
-                        be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
-                        (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
-                        vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
-                        (int)(qpc->pri_path.fl));
        }
        if (vp_oper->state.spoofchk) {
-               qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
+               qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
-               mlx4_dbg(dev, "spoof qp %d  port %d feup  0x%x, myLmc 0x%x mindx %d\n",
-                        be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
-                        (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
-                        vp_oper->mac_idx);
        }
        return 0;
 }
index fe42fc00d8d314d6011d621c23c64370b93fe304..d16b11ed2e5227a75f5a183446136f81de855281 100644 (file)
@@ -22,7 +22,6 @@ if NET_VENDOR_MICREL
 config ARM_KS8695_ETHER
        tristate "KS8695 Ethernet support"
        depends on ARM && ARCH_KS8695
-       select NET_CORE
        select MII
        ---help---
          If you wish to compile a kernel for the KS8695 and want to
@@ -39,7 +38,6 @@ config KS8842
 config KS8851
        tristate "Micrel KS8851 SPI"
        depends on SPI
-       select NET_CORE
        select MII
        select CRC32
        select EEPROM_93CX6
@@ -49,7 +47,6 @@ config KS8851
 config KS8851_MLL
        tristate "Micrel KS8851 MLL"
        depends on HAS_IOMEM
-       select NET_CORE
        select MII
        ---help---
          This platform driver is for Micrel KS8851 Address/data bus
@@ -58,7 +55,6 @@ config KS8851_MLL
 config KSZ884X_PCI
        tristate "Micrel KSZ8841/2 PCI"
        depends on PCI
-       select NET_CORE
        select MII
        select CRC32
        ---help---
index b6c60fdef4ff6d1d5498e1bfb688b4edfa7ad8bd..106eb972f2acade4f52ebdd6ceed74f6cac7c5bd 100644 (file)
@@ -1600,7 +1600,6 @@ ks8695_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct ks8695_priv *ksp = netdev_priv(ndev);
 
-       platform_set_drvdata(pdev, NULL);
        netif_napi_del(&ksp->napi);
 
        unregister_netdev(ndev);
index fbcb9e74d7fc598f1fe52951a087603f58e11efc..e393d998be89dd2128fccaef65efd36a92c09d26 100644 (file)
@@ -1250,7 +1250,6 @@ static int ks8842_remove(struct platform_device *pdev)
        iounmap(adapter->hw_addr);
        free_netdev(netdev);
        release_mem_region(iomem->start, resource_size(iomem));
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index ddaf138ce0d4aa79344584f938dad12047a73c97..ac20098b542a37697f8e78989428653d6d29ba67 100644 (file)
@@ -35,6 +35,9 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/ks8851_mll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
 
 #define        DRV_NAME        "ks8851_mll"
 
@@ -1524,6 +1527,13 @@ static int ks_hw_init(struct ks_net *ks)
        return true;
 }
 
+#if defined(CONFIG_OF)
+static const struct of_device_id ks8851_ml_dt_ids[] = {
+       { .compatible = "micrel,ks8851-mll" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
+#endif
 
 static int ks8851_probe(struct platform_device *pdev)
 {
@@ -1532,7 +1542,7 @@ static int ks8851_probe(struct platform_device *pdev)
        struct net_device *netdev;
        struct ks_net *ks;
        u16 id, data;
-       struct ks8851_mll_platform_data *pdata;
+       const char *mac;
 
        io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1619,13 +1629,21 @@ static int ks8851_probe(struct platform_device *pdev)
        ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
 
        /* overwriting the default MAC address */
-       pdata = pdev->dev.platform_data;
-       if (!pdata) {
-               netdev_err(netdev, "No platform data\n");
-               err = -ENODEV;
-               goto err_pdata;
+       if (pdev->dev.of_node) {
+               mac = of_get_mac_address(pdev->dev.of_node);
+               if (mac)
+                       memcpy(ks->mac_addr, mac, ETH_ALEN);
+       } else {
+               struct ks8851_mll_platform_data *pdata;
+
+               pdata = pdev->dev.platform_data;
+               if (!pdata) {
+                       netdev_err(netdev, "No platform data\n");
+                       err = -ENODEV;
+                       goto err_pdata;
+               }
+               memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
        }
-       memcpy(ks->mac_addr, pdata->mac_addr, 6);
        if (!is_valid_ether_addr(ks->mac_addr)) {
                /* Use random MAC address if none passed */
                eth_random_addr(ks->mac_addr);
@@ -1671,7 +1689,6 @@ static int ks8851_remove(struct platform_device *pdev)
        iounmap(ks->hw_addr);
        free_netdev(netdev);
        release_mem_region(iomem->start, resource_size(iomem));
-       platform_set_drvdata(pdev, NULL);
        return 0;
 
 }
@@ -1680,6 +1697,7 @@ static struct platform_driver ks8851_platform_driver = {
        .driver = {
                .name = DRV_NAME,
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
        },
        .probe = ks8851_probe,
        .remove = ks8851_remove,
index cb9e6383150043e3186bfc1c51ee8f334fb090c0..dc2c6f561e9ade6175fb72bf187db05e528d17f7 100644 (file)
@@ -422,7 +422,6 @@ exit_free_pfifo:
 exit_free_xc:
        free_xc(priv->xc);
 exit_free_netdev:
-       platform_set_drvdata(pdev, NULL);
        free_netdev(ndev);
 exit:
        return ret;
@@ -430,11 +429,9 @@ exit:
 
 static int netx_eth_drv_remove(struct platform_device *pdev)
 {
-       struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
        struct netx_eth_priv *priv = netdev_priv(ndev);
 
-       platform_set_drvdata(pdev, NULL);
-
        unregister_netdev(ndev);
        xc_stop(priv->xc);
        free_xc(priv->xc);
index 334c17183095eda5ff5d0775171ecf0a46636c5a..01182b559473cc4812909bccdcbf78cb81de08ce 100644 (file)
@@ -22,7 +22,6 @@ config W90P910_ETH
        tristate "Nuvoton w90p910 Ethernet support"
        depends on ARM && ARCH_W90X900
        select PHYLIB
-       select NET_CORE
        select MII
        ---help---
          Say Y here if you want to use built-in Ethernet ports
index 3df8287b7452d77b69bee0262a9f950125020b8e..e88bdb1aa669c1f60c9b161f2b81fb10150c7777 100644 (file)
@@ -1051,7 +1051,6 @@ failed_put_clk:
        clk_put(ether->clk);
 failed_free_rxirq:
        free_irq(ether->rxirq, pdev);
-       platform_set_drvdata(pdev, NULL);
 failed_free_txirq:
        free_irq(ether->txirq, pdev);
 failed_free_io:
@@ -1080,7 +1079,6 @@ static int w90p910_ether_remove(struct platform_device *pdev)
        free_irq(ether->rxirq, dev);
 
        del_timer_sync(&ether->check_timer);
-       platform_set_drvdata(pdev, NULL);
 
        free_netdev(dev);
        return 0;
index b003fe53c8e2508e3de8f2c2a118060b57ac02ac..098b96dad66f901582c9d147416f7e2a2448d861 100644 (file)
@@ -6340,7 +6340,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
        {0,},
 };
 
-static struct pci_driver driver = {
+static struct pci_driver forcedeth_pci_driver = {
        .name           = DRV_NAME,
        .id_table       = pci_tbl,
        .probe          = nv_probe,
@@ -6349,16 +6349,6 @@ static struct pci_driver driver = {
        .driver.pm      = NV_PM_OPS,
 };
 
-static int __init init_nic(void)
-{
-       return pci_register_driver(&driver);
-}
-
-static void __exit exit_nic(void)
-{
-       pci_unregister_driver(&driver);
-}
-
 module_param(max_interrupt_work, int, 0);
 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
 module_param(optimization_mode, int, 0);
@@ -6379,11 +6369,8 @@ module_param(debug_tx_timeout, bool, 0);
 MODULE_PARM_DESC(debug_tx_timeout,
                 "Dump tx related registers and ring when tx_timeout happens");
 
+module_pci_driver(forcedeth_pci_driver);
 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
 MODULE_LICENSE("GPL");
-
 MODULE_DEVICE_TABLE(pci, pci_tbl);
-
-module_init(init_nic);
-module_exit(exit_nic);
index 55a5548d6add50d43202cc74b46acd37934e556a..a061b93efe66a29fd663a49bf16d1a510ff0879f 100644 (file)
@@ -1483,7 +1483,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        return 0;
 
 err_out_unregister_netdev:
-       platform_set_drvdata(pdev, NULL);
        unregister_netdev(ndev);
 err_out_dma_unmap:
        if (!use_iram_for_net(&pldat->pdev->dev) ||
@@ -1511,7 +1510,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
        struct netdata_local *pldat = netdev_priv(ndev);
 
        unregister_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
 
        if (!use_iram_for_net(&pldat->pdev->dev) ||
            pldat->dma_buff_size > lpc32xx_return_iram_size())
index 921729f9c85c53baa17cd334f4a7b8f35f590561..e6e029237a63895a0726c0b442d0a6f59728d389 100644 (file)
@@ -1437,7 +1437,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       dev_set_drvdata(&pdev->dev, netdev);
+       platform_set_drvdata(pdev, netdev);
        p = netdev_priv(netdev);
        netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
                       OCTEON_MGMT_NAPI_WEIGHT);
@@ -1559,7 +1559,7 @@ err:
 
 static int octeon_mgmt_remove(struct platform_device *pdev)
 {
-       struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+       struct net_device *netdev = platform_get_drvdata(pdev);
 
        unregister_netdev(netdev);
        free_netdev(netdev);
index 34d05bf72b2e0e403e3568d7eb2193535d8fda6c..cb22341a14a8c03fd7627c0d2d2a31f9f68ce126 100644 (file)
@@ -5,7 +5,6 @@
 config PCH_GBE
        tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
        depends on PCI
-       select NET_CORE
        select MII
        select PTP_1588_CLOCK_PCH
        ---help---
index cbbeca3f8c5c16c3acd5993601c271055fa94c8c..8d5180043c7001986a3185442743f6a3dbdb44b7 100644 (file)
@@ -21,7 +21,6 @@ if NET_PACKET_ENGINE
 config HAMACHI
        tristate "Packet Engines Hamachi GNIC-II support"
        depends on PCI
-       select NET_CORE
        select MII
        ---help---
          If you have a Gigabit Ethernet card of this type, say Y and read
index 322a36b76727d3522bb8bb1a35440d18ee21b696..3fe09ab2d7c9f368a53188bc7645d212c0d6c53e 100644 (file)
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 80
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.80"
+#define _NETXEN_NIC_LINUX_SUBVERSION 81
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.81"
 
 #define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
 #define _major(v)      (((v) >> 24) & 0xff)
@@ -1855,7 +1855,7 @@ static const struct netxen_brdinfo netxen_boards[] = {
 
 #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
 
-static inline void get_brd_name_by_type(u32 type, char *name)
+static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name)
 {
        int i, found = 0;
        for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
@@ -1864,10 +1864,14 @@ static inline void get_brd_name_by_type(u32 type, char *name)
                        found = 1;
                        break;
                }
+       }
 
+       if (!found) {
+               strcpy(name, "Unknown");
+               return -EINVAL;
        }
-       if (!found)
-               name = "Unknown";
+
+       return 0;
 }
 
 static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
index 28e076960bcb5c8400e13eb62ac8d29c7d2f0841..32c790659f9c1ba0410614c30818eb9cf20a46ee 100644 (file)
@@ -734,6 +734,9 @@ enum {
 #define NIC_CRB_BASE_2         (NETXEN_CAM_RAM(0x700))
 #define NETXEN_NIC_REG(X)      (NIC_CRB_BASE+(X))
 #define NETXEN_NIC_REG_2(X)    (NIC_CRB_BASE_2+(X))
+#define NETXEN_INTR_MODE_REG   NETXEN_NIC_REG(0x44)
+#define NETXEN_MSI_MODE                0x1
+#define NETXEN_INTX_MODE       0x2
 
 #define NX_CDRP_CRB_OFFSET             (NETXEN_NIC_REG(0x18))
 #define NX_ARG1_CRB_OFFSET             (NETXEN_NIC_REG(0x1c))
index af951f343ff6d1396589ffbbf462aa2c7a80c776..c401b0b4353d94d543e357c16c184c8771eafe7a 100644 (file)
@@ -592,48 +592,60 @@ static const struct net_device_ops netxen_netdev_ops = {
 #endif
 };
 
-static void
-netxen_setup_intr(struct netxen_adapter *adapter)
+static inline bool netxen_function_zero(struct pci_dev *pdev)
 {
-       struct netxen_legacy_intr_set *legacy_intrp;
-       struct pci_dev *pdev = adapter->pdev;
-       int err, num_msix;
+       return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
+}
 
-       if (adapter->rss_supported) {
-               num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
-                       MSIX_ENTRIES_PER_ADAPTER : 2;
-       } else
-               num_msix = 1;
+static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
+                                            u32 mode)
+{
+       NXWR32(adapter, NETXEN_INTR_MODE_REG, mode);
+}
 
-       adapter->max_sds_rings = 1;
+static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter)
+{
+       return NXRD32(adapter, NETXEN_INTR_MODE_REG);
+}
 
-       adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+static void
+netxen_initialize_interrupt_registers(struct netxen_adapter *adapter)
+{
+       struct netxen_legacy_intr_set *legacy_intrp;
+       u32 tgt_status_reg, int_state_reg;
 
        if (adapter->ahw.revision_id >= NX_P3_B0)
                legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
        else
                legacy_intrp = &legacy_intr[0];
 
+       tgt_status_reg = legacy_intrp->tgt_status_reg;
+       int_state_reg = ISR_INT_STATE_REG;
+
        adapter->int_vec_bit = legacy_intrp->int_vec_bit;
-       adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
-                       legacy_intrp->tgt_status_reg);
+       adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg);
        adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
-                       legacy_intrp->tgt_mask_reg);
+                                                 legacy_intrp->tgt_mask_reg);
        adapter->pci_int_reg = netxen_get_ioaddr(adapter,
-                       legacy_intrp->pci_int_reg);
+                                                legacy_intrp->pci_int_reg);
        adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
 
        if (adapter->ahw.revision_id >= NX_P3_B1)
                adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
-                       ISR_INT_STATE_REG);
+                                                              int_state_reg);
        else
                adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
-                       CRB_INT_VECTOR);
+                                                              CRB_INT_VECTOR);
+}
 
-       netxen_set_msix_bit(pdev, 0);
+static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
+                                      int num_msix)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       u32 value;
+       int err;
 
        if (adapter->msix_supported) {
-
                netxen_init_msix_entries(adapter, num_msix);
                err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
                if (err == 0) {
@@ -644,26 +656,59 @@ netxen_setup_intr(struct netxen_adapter *adapter)
                                adapter->max_sds_rings = num_msix;
 
                        dev_info(&pdev->dev, "using msi-x interrupts\n");
-                       return;
+                       return 0;
                }
-
-               if (err > 0)
-                       pci_disable_msix(pdev);
-
                /* fall through for msi */
        }
 
        if (use_msi && !pci_enable_msi(pdev)) {
+               value = msi_tgt_status[adapter->ahw.pci_func];
                adapter->flags |= NETXEN_NIC_MSI_ENABLED;
-               adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
-                               msi_tgt_status[adapter->ahw.pci_func]);
-               dev_info(&pdev->dev, "using msi interrupts\n");
+               adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value);
                adapter->msix_entries[0].vector = pdev->irq;
-               return;
+               dev_info(&pdev->dev, "using msi interrupts\n");
+               return 0;
        }
 
-       dev_info(&pdev->dev, "using legacy interrupts\n");
-       adapter->msix_entries[0].vector = pdev->irq;
+       dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n");
+       return -EIO;
+}
+
+static int netxen_setup_intr(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       int num_msix;
+
+       if (adapter->rss_supported)
+               num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+                           MSIX_ENTRIES_PER_ADAPTER : 2;
+       else
+               num_msix = 1;
+
+       adapter->max_sds_rings = 1;
+       adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+
+       netxen_initialize_interrupt_registers(adapter);
+       netxen_set_msix_bit(pdev, 0);
+
+       if (netxen_function_zero(pdev)) {
+               if (!netxen_setup_msi_interrupts(adapter, num_msix))
+                       netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
+               else
+                       netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE);
+       } else {
+               if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE &&
+                   netxen_setup_msi_interrupts(adapter, num_msix)) {
+                       dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n");
+                       return -EIO;
+               }
+       }
+
+       if (!NETXEN_IS_MSI_FAMILY(adapter)) {
+               adapter->msix_entries[0].vector = pdev->irq;
+               dev_info(&pdev->dev, "using legacy interrupts\n");
+       }
+       return 0;
 }
 
 static void
@@ -841,7 +886,9 @@ netxen_check_options(struct netxen_adapter *adapter)
        }
 
        if (adapter->portnum == 0) {
-               get_brd_name_by_type(adapter->ahw.board_type, brd_name);
+               if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type,
+                                                   brd_name))
+                       strcpy(serial_num, "Unknown");
 
                pr_info("%s: %s Board S/N %s  Chip rev 0x%x\n",
                                module_name(THIS_MODULE),
@@ -860,9 +907,9 @@ netxen_check_options(struct netxen_adapter *adapter)
                adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
        }
 
-       dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
-                       fw_major, fw_minor, fw_build,
-                       adapter->ahw.cut_through ? "cut-through" : "legacy");
+       dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n",
+                NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build,
+                adapter->ahw.cut_through ? "cut-through" : "legacy");
 
        if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
                adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
@@ -1508,7 +1555,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netxen_nic_clear_stats(adapter);
 
-       netxen_setup_intr(adapter);
+       err = netxen_setup_intr(adapter);
+
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to setup interrupts, error = %d\n", err);
+               goto err_out_disable_msi;
+       }
 
        err = netxen_setup_netdev(adapter, netdev);
        if (err)
@@ -1596,7 +1649,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
        clear_bit(__NX_RESETTING, &adapter->state);
 
        netxen_teardown_intr(adapter);
-
+       netxen_set_interrupt_mode(adapter, 0);
        netxen_remove_diag_entries(adapter);
 
        netxen_cleanup_pci_map(adapter);
@@ -2721,7 +2774,7 @@ netxen_store_bridged_mode(struct device *dev,
        if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
                goto err_out;
 
-       if (strict_strtoul(buf, 2, &new))
+       if (kstrtoul(buf, 2, &new))
                goto err_out;
 
        if (!netxen_config_bridged_mode(adapter, !!new))
@@ -2760,7 +2813,7 @@ netxen_store_diag_mode(struct device *dev,
        struct netxen_adapter *adapter = dev_get_drvdata(dev);
        unsigned long new;
 
-       if (strict_strtoul(buf, 2, &new))
+       if (kstrtoul(buf, 2, &new))
                return -EINVAL;
 
        if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
@@ -3311,7 +3364,7 @@ static int netxen_netdev_event(struct notifier_block *this,
                                 unsigned long event, void *ptr)
 {
        struct netxen_adapter *adapter;
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net_device *orig_dev = dev;
        struct net_device *slave;
 
index 90c253b145eff8b9ae622877933f101cdf70b633..b00cf5665eabee735b1e218e6aeda7111380fe69 100644 (file)
@@ -38,8 +38,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 2
-#define _QLCNIC_LINUX_SUBVERSION 42
-#define QLCNIC_LINUX_VERSIONID  "5.2.42"
+#define _QLCNIC_LINUX_SUBVERSION 44
+#define QLCNIC_LINUX_VERSIONID  "5.2.44"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -303,7 +303,6 @@ extern int qlcnic_use_msi;
 extern int qlcnic_use_msi_x;
 extern int qlcnic_auto_fw_reset;
 extern int qlcnic_load_fw_file;
-extern int qlcnic_config_npars;
 
 /* Number of status descriptors to handle per interrupt */
 #define MAX_STATUS_HANDLE      (64)
@@ -394,6 +393,9 @@ struct qlcnic_fw_dump {
        u32     size;   /* total size of the dump */
        void    *data;  /* dump data area */
        struct  qlcnic_dump_template_hdr *tmpl_hdr;
+       dma_addr_t phys_addr;
+       void    *dma_buffer;
+       bool    use_pex_dma;
 };
 
 /*
@@ -427,8 +429,10 @@ struct qlcnic_hardware_context {
        u8 nic_mode;
        char diag_cnt;
 
+       u16 max_uc_count;
        u16 port_type;
        u16 board_type;
+       u16 supported_type;
 
        u16 link_speed;
        u16 link_duplex;
@@ -442,9 +446,10 @@ struct qlcnic_hardware_context {
        u16 max_mtu;
        u32 msg_enable;
        u16 act_pci_func;
+       u16 max_pci_func;
 
        u32 capabilities;
-       u32 capabilities2;
+       u32 extra_capability[3];
        u32 temp;
        u32 int_vec_bit;
        u32 fw_hal_version;
@@ -814,7 +819,8 @@ struct qlcnic_mac_list_s {
 
 #define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
 #define QLCNIC_FW_CAP2_HW_LRO_IPV6             BIT_3
-#define QLCNIC_FW_CAPABILITY_2_OCBB            BIT_5
+#define QLCNIC_FW_CAPABILITY_SET_DRV_VER       BIT_5
+#define QLCNIC_FW_CAPABILITY_2_BEACON          BIT_7
 
 /* module types */
 #define LINKEVENT_MODULE_NOT_PRESENT                   1
@@ -906,8 +912,14 @@ struct qlcnic_ipaddr {
 #define QLCNIC_FW_HANG                 0x4000
 #define QLCNIC_FW_LRO_MSS_CAP          0x8000
 #define QLCNIC_TX_INTR_SHARED          0x10000
+#define QLCNIC_APP_CHANGED_FLAGS       0x20000
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
        ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+#define QLCNIC_IS_TSO_CAPABLE(adapter)  \
+       ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+
+#define QLCNIC_BEACON_EANBLE           0xC
+#define QLCNIC_BEACON_DISABLE          0xD
 
 #define QLCNIC_DEF_NUM_STS_DESC_RINGS  4
 #define QLCNIC_MSIX_TBL_SPACE          8192
@@ -928,6 +940,7 @@ struct qlcnic_ipaddr {
 #define __QLCNIC_SRIOV_ENABLE          10
 #define __QLCNIC_SRIOV_CAPABLE         11
 #define __QLCNIC_MBX_POLL_ENABLE       12
+#define __QLCNIC_DIAG_MODE             13
 
 #define QLCNIC_INTERRUPT_TEST          1
 #define QLCNIC_LOOPBACK_TEST           2
@@ -1033,6 +1046,7 @@ struct qlcnic_adapter {
        spinlock_t rx_mac_learn_lock;
        u32 file_prd_off;       /*File fw product offset*/
        u32 fw_version;
+       u32 offload_flags;
        const struct firmware *fw;
 };
 
@@ -1462,7 +1476,7 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
 void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
 
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
-int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
 int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
 netdev_features_t qlcnic_fix_features(struct net_device *netdev,
        netdev_features_t features);
@@ -1484,7 +1498,9 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
 int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
 int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
+void qlcnic_set_drv_version(struct qlcnic_adapter *);
 
 /*  eSwitch management functions */
 int qlcnic_config_switch_port(struct qlcnic_adapter *,
@@ -1514,6 +1530,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
 void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -1537,9 +1554,12 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
 int qlcnic_reset_npar_config(struct qlcnic_adapter *);
 int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
 void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
+int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *);
 int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
 int qlcnic_read_mac_addr(struct qlcnic_adapter *);
 int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+                               struct qlcnic_esw_func_cfg *);
 void qlcnic_sriov_vf_schedule_multi(struct net_device *);
 void qlcnic_vf_add_mc_list(struct net_device *, u16);
 
@@ -1576,6 +1596,8 @@ struct qlcnic_nic_template {
        void (*napi_del)(struct qlcnic_adapter *);
        void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
        irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+       int (*shutdown)(struct pci_dev *);
+       int (*resume)(struct qlcnic_adapter *);
 };
 
 /* Adapter hardware abstraction */
@@ -1617,6 +1639,7 @@ struct qlcnic_hardware_ops {
        int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
        void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
        int (*get_board_info) (struct qlcnic_adapter *);
+       void (*set_mac_filter_count) (struct qlcnic_adapter *);
        void (*free_mac_list) (struct qlcnic_adapter *);
 };
 
@@ -1779,6 +1802,18 @@ static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
        adapter->ahw->hw_ops->napi_enable(adapter);
 }
 
+static inline int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+       return adapter->nic_ops->shutdown(pdev);
+}
+
+static inline int __qlcnic_resume(struct qlcnic_adapter *adapter)
+{
+       return adapter->nic_ops->resume(adapter);
+}
+
 static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
 {
        adapter->ahw->hw_ops->napi_disable(adapter);
@@ -1832,6 +1867,11 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
        return adapter->ahw->hw_ops->free_mac_list(adapter);
 }
 
+static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+       adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+}
+
 static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
                                            u32 key)
 {
@@ -1878,6 +1918,21 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
                writel(0xfbff, adapter->tgt_mask_reg);
 }
 
+static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter)
+{
+       return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter)
+{
+       clear_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter)
+{
+       return test_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
 extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
 extern const struct ethtool_ops qlcnic_ethtool_ops;
 extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
index ea790a93ee7c0e79acedd062a1d1795fb413e8c4..0913c623a67efd238e7d989d50cb2b1c7238cefa 100644 (file)
@@ -63,6 +63,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
        {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
        {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
        {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+       {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1},
        {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
        {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
        {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
@@ -172,6 +173,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
        .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
        .change_l2_filter               = qlcnic_83xx_change_l2_filter,
        .get_board_info                 = qlcnic_83xx_get_port_info,
+       .set_mac_filter_count           = qlcnic_83xx_set_mac_filter_count,
        .free_mac_list                  = qlcnic_82xx_free_mac_list,
 };
 
@@ -184,6 +186,8 @@ static struct qlcnic_nic_template qlcnic_83xx_ops = {
        .napi_del               = qlcnic_83xx_napi_del,
        .config_ipaddr          = qlcnic_83xx_config_ipaddr,
        .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
+       .shutdown               = qlcnic_83xx_shutdown,
+       .resume                 = qlcnic_83xx_resume,
 };
 
 void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
@@ -312,6 +316,11 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
        writel(0, adapter->tgt_mask_reg);
 }
 
+inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+       writel(1, adapter->tgt_mask_reg);
+}
+
 /* Enable MSI-x and INT-x interrupts */
 void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
                             struct qlcnic_host_sds_ring *sds_ring)
@@ -458,6 +467,9 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
 {
        u32 num_msix;
 
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+               qlcnic_83xx_set_legacy_intr_mask(adapter);
+
        qlcnic_83xx_disable_mbx_intr(adapter);
 
        if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -474,7 +486,6 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
 {
        irq_handler_t handler;
        u32 val;
-       char name[32];
        int err = 0;
        unsigned long flags = 0;
 
@@ -485,9 +496,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
        if (adapter->flags & QLCNIC_MSIX_ENABLED) {
                handler = qlcnic_83xx_handle_aen;
                val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
-               snprintf(name, (IFNAMSIZ + 4),
-                        "%s[%s]", "qlcnic", "aen");
-               err = request_irq(val, handler, flags, name, adapter);
+               err = request_irq(val, handler, flags, "qlcnic-MB", adapter);
                if (err) {
                        dev_err(&adapter->pdev->dev,
                                "failed to register MBX interrupt\n");
@@ -604,6 +613,22 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
        return status;
 }
 
+void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u16 act_pci_fn = ahw->act_pci_func;
+       u16 count;
+
+       ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
+       if (act_pci_fn <= 2)
+               count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) /
+                        act_pci_fn;
+       else
+               count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) /
+                        act_pci_fn;
+       ahw->max_uc_count = count;
+}
+
 void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
 {
        u32 val;
@@ -696,15 +721,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
        return 1;
 }
 
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
 {
        u32 data;
-       unsigned long wait_time = 0;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        /* wait for mailbox completion */
        do {
                data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
-               if (++wait_time > QLCNIC_MBX_TIMEOUT) {
+               if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
                        data = QLCNIC_RCODE_TIMEOUT;
                        break;
                }
@@ -720,8 +744,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
        u16 opcode;
        u8 mbx_err_code;
        unsigned long flags;
-       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
 
        opcode = LSW(cmd->req.arg[0]);
        if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
@@ -754,15 +778,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
        /* Signal FW about the impending command */
        QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 poll:
-       rsp = qlcnic_83xx_mbx_poll(adapter);
+       rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
        if (rsp != QLCNIC_RCODE_TIMEOUT) {
                /* Get the FW response data */
                fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
                if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
                        __qlcnic_83xx_process_aen(adapter);
-                       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-                       if (mbx_val)
-                               goto poll;
+                       goto poll;
                }
                mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
                rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -842,7 +864,9 @@ void qlcnic_83xx_idc_aen_work(struct work_struct *work)
        int i, err = 0;
 
        adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+       if (err)
+               return;
 
        for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
                cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
@@ -1083,8 +1107,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
                cap |= QLC_83XX_FW_CAP_LRO_MSS;
 
        /* set mailbox hdr and capabilities */
-       qlcnic_alloc_mbx_args(&cmd, adapter,
-                             QLCNIC_CMD_CREATE_RX_CTX);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_CREATE_RX_CTX);
+       if (err)
+               return err;
 
        if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
                cmd.req.arg[0] |= (0x3 << 29);
@@ -1242,7 +1268,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
                mbx.intr_id = 0xffff;
        mbx.src = 0;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+       if (err)
+               return err;
 
        if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
                cmd.req.arg[0] |= (0x3 << 29);
@@ -1276,11 +1304,13 @@ out:
        return err;
 }
 
-static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
+                                     int num_sds_ring)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_host_rds_ring *rds_ring;
+       u16 adapter_state = adapter->is_up;
        u8 ring;
        int ret;
 
@@ -1304,6 +1334,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
        ret = qlcnic_fw_create_ctx(adapter);
        if (ret) {
                qlcnic_detach(adapter);
+               if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
+                       adapter->max_sds_rings = num_sds_ring;
+                       qlcnic_attach(adapter);
+               }
                netif_device_attach(netdev);
                return ret;
        }
@@ -1382,8 +1416,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
 
        if (state) {
                /* Get LED configuration */
-               qlcnic_alloc_mbx_args(&cmd, adapter,
-                                     QLCNIC_CMD_GET_LED_CONFIG);
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_GET_LED_CONFIG);
+               if (status)
+                       return status;
+
                status = qlcnic_issue_cmd(adapter, &cmd);
                if (status) {
                        dev_err(&adapter->pdev->dev,
@@ -1397,8 +1434,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
                /* Set LED Configuration */
                mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
                          LSW(QLC_83XX_LED_CONFIG);
-               qlcnic_alloc_mbx_args(&cmd, adapter,
-                                     QLCNIC_CMD_SET_LED_CONFIG);
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_SET_LED_CONFIG);
+               if (status)
+                       return status;
+
                cmd.req.arg[1] = mbx_in;
                cmd.req.arg[2] = mbx_in;
                cmd.req.arg[3] = mbx_in;
@@ -1415,8 +1455,11 @@ mbx_err:
 
        } else {
                /* Restoring default LED configuration */
-               qlcnic_alloc_mbx_args(&cmd, adapter,
-                                     QLCNIC_CMD_SET_LED_CONFIG);
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_SET_LED_CONFIG);
+               if (status)
+                       return status;
+
                cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
                cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
                cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
@@ -1486,10 +1529,18 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
                return;
 
        if (enable) {
-               qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_INIT_NIC_FUNC);
+               if (status)
+                       return;
+
                cmd.req.arg[1] = BIT_0 | BIT_31;
        } else {
-               qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_STOP_NIC_FUNC);
+               if (status)
+                       return;
+
                cmd.req.arg[1] = BIT_0 | BIT_31;
        }
        status = qlcnic_issue_cmd(adapter, &cmd);
@@ -1506,7 +1557,10 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
        struct qlcnic_cmd_args cmd;
        int err;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = adapter->ahw->port_config;
        err = qlcnic_issue_cmd(adapter, &cmd);
        if (err)
@@ -1520,7 +1574,10 @@ int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
        struct qlcnic_cmd_args cmd;
        int err;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+       if (err)
+               return err;
+
        err = qlcnic_issue_cmd(adapter, &cmd);
        if (err)
                dev_info(&adapter->pdev->dev, "Get Port config failed\n");
@@ -1536,7 +1593,10 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
        u32 temp;
        struct qlcnic_cmd_args cmd;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+       if (err)
+               return err;
+
        temp = adapter->recv_ctx->context_id << 16;
        cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
        err = qlcnic_issue_cmd(adapter, &cmd);
@@ -1567,7 +1627,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+       if (err)
+               return err;
+
        qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
        cmd.req.arg[1] = (mode ? 1 : 0) | temp;
        err = qlcnic_issue_cmd(adapter, &cmd);
@@ -1585,18 +1649,27 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
 
-       QLCDB(adapter, DRV, "%s loopback test in progress\n",
-             mode == QLCNIC_ILB_MODE ? "internal" : "external");
        if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
-               dev_warn(&adapter->pdev->dev,
-                        "Loopback test not supported for non privilege function\n");
+               netdev_warn(netdev,
+                           "Loopback test not supported in non privileged mode\n");
                return ret;
        }
 
-       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+               netdev_info(netdev, "Device is resetting\n");
                return -EBUSY;
+       }
+
+       if (qlcnic_get_diag_lock(adapter)) {
+               netdev_info(netdev, "Device is in diagnostics mode\n");
+               return -EBUSY;
+       }
 
-       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+       netdev_info(netdev, "%s loopback test in progress\n",
+                   mode == QLCNIC_ILB_MODE ? "internal" : "external");
+
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
+                                        max_sds_rings);
        if (ret)
                goto fail_diag_alloc;
 
@@ -1606,13 +1679,19 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
 
        /* Poll for link up event before running traffic */
        do {
-               msleep(500);
+               msleep(QLC_83XX_LB_MSLEEP_COUNT);
                if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
                        qlcnic_83xx_process_aen(adapter);
 
-               if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
-                       dev_info(&adapter->pdev->dev,
-                                "Firmware didn't sent link up event to loopback request\n");
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+                       netdev_info(netdev,
+                                   "Device is resetting, free LB test resources\n");
+                       ret = -EIO;
+                       goto free_diag_res;
+               }
+               if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
+                       netdev_info(netdev,
+                                   "Firmware didn't sent link up event to loopback request\n");
                        ret = -QLCNIC_FW_NOT_RESPOND;
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
                        goto free_diag_res;
@@ -1634,13 +1713,14 @@ free_diag_res:
 
 fail_diag_alloc:
        adapter->max_sds_rings = max_sds_rings;
-       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       qlcnic_release_diag_lock(adapter);
        return ret;
 }
 
 int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct net_device *netdev = adapter->netdev;
        int status = 0, loop = 0;
        u32 config;
 
@@ -1658,9 +1738,9 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 
        status = qlcnic_83xx_set_port_config(adapter);
        if (status) {
-               dev_err(&adapter->pdev->dev,
-                       "Failed to Set Loopback Mode = 0x%x.\n",
-                       ahw->port_config);
+               netdev_err(netdev,
+                          "Failed to Set Loopback Mode = 0x%x.\n",
+                          ahw->port_config);
                ahw->port_config = config;
                clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
                return status;
@@ -1668,13 +1748,19 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 
        /* Wait for Link and IDC Completion AEN */
        do {
-               msleep(300);
+               msleep(QLC_83XX_LB_MSLEEP_COUNT);
                if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
                        qlcnic_83xx_process_aen(adapter);
 
-               if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
-                       dev_err(&adapter->pdev->dev,
-                               "FW did not generate IDC completion AEN\n");
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+                       netdev_info(netdev,
+                                   "Device is resetting, free LB test resources\n");
+                       clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+                       return -EIO;
+               }
+               if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
+                       netdev_err(netdev,
+                                  "Did not receive IDC completion AEN\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
                        return -EIO;
@@ -1689,6 +1775,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct net_device *netdev = adapter->netdev;
        int status = 0, loop = 0;
        u32 config = ahw->port_config;
 
@@ -1700,9 +1787,9 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 
        status = qlcnic_83xx_set_port_config(adapter);
        if (status) {
-               dev_err(&adapter->pdev->dev,
-                       "Failed to Clear Loopback Mode = 0x%x.\n",
-                       ahw->port_config);
+               netdev_err(netdev,
+                          "Failed to Clear Loopback Mode = 0x%x.\n",
+                          ahw->port_config);
                ahw->port_config = config;
                clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
                return status;
@@ -1710,13 +1797,20 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 
        /* Wait for Link and IDC Completion AEN */
        do {
-               msleep(300);
+               msleep(QLC_83XX_LB_MSLEEP_COUNT);
                if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
                        qlcnic_83xx_process_aen(adapter);
 
-               if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
-                       dev_err(&adapter->pdev->dev,
-                               "Firmware didn't sent IDC completion AEN\n");
+               if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+                       netdev_info(netdev,
+                                   "Device is resetting, free LB test resources\n");
+                       clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+                       return -EIO;
+               }
+
+               if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
+                       netdev_err(netdev,
+                                  "Did not receive IDC completion AEN\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
                        return -EIO;
                }
@@ -1745,7 +1839,11 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
        u32 temp = 0, temp_ip;
        struct qlcnic_cmd_args cmd;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_CONFIGURE_IP_ADDR);
+       if (err)
+               return;
+
        qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
 
        if (mode == QLCNIC_IP_UP)
@@ -1784,7 +1882,10 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return 0;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+       if (err)
+               return err;
+
        temp = adapter->recv_ctx->context_id << 16;
        arg1 = lro_bit_mask | temp;
        cmd.req.arg[1] = arg1;
@@ -1806,8 +1907,9 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
                            0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
                            0x255b0ec26d5a56daULL };
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
-
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+       if (err)
+               return err;
        /*
         * RSS request:
         * bits 3-0: Rsvd
@@ -1913,7 +2015,10 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
        struct qlcnic_cmd_args cmd;
        u32 mac_low, mac_high;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+       if (err)
+               return err;
+
        qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
        err = qlcnic_issue_cmd(adapter, &cmd);
 
@@ -1944,7 +2049,10 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+       if (err)
+               return;
+
        if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
                temp = adapter->recv_ctx->context_id;
                cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
@@ -2016,7 +2124,10 @@ int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
                return err;
        }
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = (port & 0xf) | BIT_4;
        err = qlcnic_issue_cmd(adapter, &cmd);
 
@@ -2044,7 +2155,10 @@ int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
                return err;
        }
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = (nic->pci_func << 16);
        cmd.req.arg[2] = 0x1 << 16;
        cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
@@ -2075,13 +2189,17 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
        u32 temp;
        u8 op = 0;
        struct qlcnic_cmd_args cmd;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
-       if (func_id != adapter->ahw->pci_func) {
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+       if (err)
+               return err;
+
+       if (func_id != ahw->pci_func) {
                temp = func_id << 16;
                cmd.req.arg[1] = op | BIT_31 | temp;
        } else {
-               cmd.req.arg[1] = adapter->ahw->pci_func << 16;
+               cmd.req.arg[1] = ahw->pci_func << 16;
        }
        err = qlcnic_issue_cmd(adapter, &cmd);
        if (err) {
@@ -2108,6 +2226,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
                temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
                npar_info->max_linkspeed_reg_offset = temp;
        }
+       if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS)
+               memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
+                      sizeof(ahw->extra_capability));
 
 out:
        qlcnic_free_mbx_args(&cmd);
@@ -2117,26 +2238,28 @@ out:
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
                             struct qlcnic_pci_info *pci_info)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_cmd_args cmd;
        int i, err = 0, j = 0;
        u32 temp;
-       struct qlcnic_cmd_args cmd;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+       if (err)
+               return err;
+
        err = qlcnic_issue_cmd(adapter, &cmd);
 
-       adapter->ahw->act_pci_func = 0;
+       ahw->act_pci_func = 0;
        if (err == QLCNIC_RCODE_SUCCESS) {
-               pci_info->func_count = cmd.rsp.arg[1] & 0xFF;
-               dev_info(&adapter->pdev->dev,
-                        "%s: total functions = %d\n",
-                        __func__, pci_info->func_count);
+               ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
                for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
                        pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
                        pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
                        i++;
                        pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
                        if (pci_info->type == QLCNIC_TYPE_NIC)
-                               adapter->ahw->act_pci_func++;
+                               ahw->act_pci_func++;
                        temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
                        pci_info->default_port = temp;
                        i++;
@@ -2148,18 +2271,21 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
                        i++;
                        memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
                        i = i + 3;
-
-                       dev_info(&adapter->pdev->dev, "%s:\n"
-                                "\tid = %d active = %d type = %d\n"
-                                "\tport = %d min bw = %d max bw = %d\n"
-                                "\tmac_addr =  %pM\n", __func__,
-                                pci_info->id, pci_info->active, pci_info->type,
-                                pci_info->default_port, pci_info->tx_min_bw,
-                                pci_info->tx_max_bw, pci_info->mac);
+                       if (ahw->op_mode == QLCNIC_MGMT_FUNC)
+                               dev_info(dev, "id = %d active = %d type = %d\n"
+                                        "\tport = %d min bw = %d max bw = %d\n"
+                                        "\tmac_addr =  %pM\n", pci_info->id,
+                                        pci_info->active, pci_info->type,
+                                        pci_info->default_port,
+                                        pci_info->tx_min_bw,
+                                        pci_info->tx_max_bw, pci_info->mac);
                }
+               if (ahw->op_mode == QLCNIC_MGMT_FUNC)
+                       dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n",
+                                ahw->max_pci_func, ahw->act_pci_func);
+
        } else {
-               dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n",
-                       err);
+               dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
                err = -EIO;
        }
 
@@ -2176,7 +2302,10 @@ int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
        struct qlcnic_cmd_args cmd;
 
        max_ints = adapter->ahw->num_msix - 1;
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = max_ints;
 
        if (qlcnic_sriov_vf_check(adapter))
@@ -2804,7 +2933,11 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
                dev_info(&adapter->pdev->dev, "link state down\n");
                return config;
        }
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+       if (err)
+               return err;
+
        err = qlcnic_issue_cmd(adapter, &cmd);
        if (err) {
                dev_info(&adapter->pdev->dev,
@@ -2830,6 +2963,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
                        break;
                }
                config = cmd.rsp.arg[3];
+               if (QLC_83XX_SFP_PRESENT(config)) {
+                       switch (ahw->module_type) {
+                       case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+                       case LINKEVENT_MODULE_OPTICAL_SRLR:
+                       case LINKEVENT_MODULE_OPTICAL_LRM:
+                       case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+                               ahw->supported_type = PORT_FIBRE;
+                               break;
+                       case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+                       case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+                       case LINKEVENT_MODULE_TWINAX:
+                               ahw->supported_type = PORT_TP;
+                               break;
+                       default:
+                               ahw->supported_type = PORT_OTHER;
+                       }
+               }
                if (config & 1)
                        err = 1;
        }
@@ -2838,7 +2988,8 @@ out:
        return config;
 }
 
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
+                            struct ethtool_cmd *ecmd)
 {
        u32 config = 0;
        int status = 0;
@@ -2851,6 +3002,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
        ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
        /* hard code until there is a way to get it from flash */
        ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+
+       if (netif_running(adapter->netdev) && ahw->has_link_events) {
+               ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+               ecmd->duplex = ahw->link_duplex;
+               ecmd->autoneg = ahw->link_autoneg;
+       } else {
+               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               ecmd->duplex = DUPLEX_UNKNOWN;
+               ecmd->autoneg = AUTONEG_DISABLE;
+       }
+
+       if (ahw->port_type == QLCNIC_XGBE) {
+               ecmd->supported = SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_1000baseT_Full;
+       } else {
+               ecmd->supported = (SUPPORTED_10baseT_Half |
+                                  SUPPORTED_10baseT_Full |
+                                  SUPPORTED_100baseT_Half |
+                                  SUPPORTED_100baseT_Full |
+                                  SUPPORTED_1000baseT_Half |
+                                  SUPPORTED_1000baseT_Full);
+               ecmd->advertising = (ADVERTISED_100baseT_Half |
+                                    ADVERTISED_100baseT_Full |
+                                    ADVERTISED_1000baseT_Half |
+                                    ADVERTISED_1000baseT_Full);
+       }
+
+       switch (ahw->supported_type) {
+       case PORT_FIBRE:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_FIBRE;
+               ecmd->transceiver = XCVR_EXTERNAL;
+               break;
+       case PORT_TP:
+               ecmd->supported |= SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->port = PORT_TP;
+               ecmd->transceiver = XCVR_INTERNAL;
+               break;
+       default:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_OTHER;
+               ecmd->transceiver = XCVR_EXTERNAL;
+               break;
+       }
+       ecmd->phy_address = ahw->physical_port;
        return status;
 }
 
@@ -2964,7 +3163,9 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
        struct net_device *netdev = adapter->netdev;
        int ret = 0;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+       if (ret)
+               return;
        /* Get Tx stats */
        cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
        cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
@@ -3043,15 +3244,20 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
        u8 val;
        int ret, max_sds_rings = adapter->max_sds_rings;
 
-       if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
-               return -EIO;
+       if (qlcnic_get_diag_lock(adapter)) {
+               netdev_info(netdev, "Device in diagnostics mode\n");
+               return -EBUSY;
+       }
 
-       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
+                                        max_sds_rings);
        if (ret)
                goto fail_diag_irq;
 
        ahw->diag_cnt = 0;
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+       if (ret)
+               goto fail_diag_irq;
 
        if (adapter->flags & QLCNIC_MSIX_ENABLED)
                intrpt_id = ahw->intr_tbl[0].id;
@@ -3085,7 +3291,7 @@ done:
 
 fail_diag_irq:
        adapter->max_sds_rings = max_sds_rings;
-       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       qlcnic_release_diag_lock(adapter);
        return ret;
 }
 
@@ -3189,3 +3395,54 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
        }
        return 0;
 }
+
+int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       int retval;
+
+       netif_device_detach(netdev);
+       qlcnic_cancel_idc_work(adapter);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       qlcnic_83xx_disable_mbx_intr(adapter);
+       cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+       return 0;
+}
+
+int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       int err = 0;
+
+       err = qlcnic_83xx_idc_init(adapter);
+       if (err)
+               return err;
+
+       if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) {
+               if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
+                       qlcnic_83xx_set_vnic_opmode(adapter);
+               } else {
+                       err = qlcnic_83xx_check_vnic_state(adapter);
+                       if (err)
+                               return err;
+               }
+       }
+
+       err = qlcnic_83xx_idc_reattach_driver(adapter);
+       if (err)
+               return err;
+
+       qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+                            idc->delay);
+       return err;
+}
index 1f1d85e6f2afd1e4cb1581f03088333b034726a3..2548d1403d75a7668434d53db5013f3969dd8f78 100644 (file)
@@ -36,7 +36,8 @@
 #define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT         3
 #define QLC_83XX_DRV_LOCK_RECOVERY_DELAY               200
 #define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK         0x3
-
+#define QLC_83XX_LB_WAIT_COUNT                         250
+#define QLC_83XX_LB_MSLEEP_COUNT                       20
 #define QLC_83XX_NO_NIC_RESOURCE       0x5
 #define QLC_83XX_MAC_PRESENT           0xC
 #define QLC_83XX_MAC_ABSENT            0xD
@@ -314,6 +315,7 @@ struct qlc_83xx_idc {
        u8              vnic_state;
        u8              vnic_wait_limit;
        u8              quiesce_req;
+       u8              delay_reset;
        char            **name;
 };
 
@@ -392,6 +394,8 @@ enum qlcnic_83xx_states {
 #define QLC_83XX_LB_MAX_FILTERS                        2048
 #define QLC_83XX_LB_BUCKET_SIZE                        256
 #define QLC_83XX_MINIMUM_VECTOR                        3
+#define QLC_83XX_MAX_MC_COUNT                  38
+#define QLC_83XX_MAX_UC_COUNT                  4096
 
 #define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val)     (val & 0x80000000)
 #define QLC_83XX_GET_LRO_CAPABILITY(val)               (val & 0x20)
@@ -603,7 +607,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
                                struct ethtool_pauseparam *);
@@ -620,7 +624,14 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
 int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
 int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
 u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *);
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
 void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
 void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
+void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+int qlcnic_83xx_shutdown(struct pci_dev *);
+int qlcnic_83xx_resume(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
 #endif
index ab1d8d99cbd530d59727a4e131789f3eb210a9bb..f41dfab1e9a35d3fc4b3d72c820b2d5db84f59e0 100644 (file)
@@ -382,8 +382,6 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
        dev_err(&adapter->pdev->dev, "%s:\n", __func__);
 
-       adapter->netdev->trans_start = jiffies;
-
        return 0;
 }
 
@@ -435,10 +433,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
        }
 done:
        netif_device_attach(netdev);
-       if (netif_running(netdev)) {
-               netif_carrier_on(netdev);
-               netif_wake_queue(netdev);
-       }
 }
 
 static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
@@ -612,7 +606,7 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
 {
        int err;
 
@@ -635,6 +629,7 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
                return -EIO;
        }
 
+       qlcnic_set_drv_version(adapter);
        qlcnic_83xx_idc_attach_driver(adapter);
 
        return 0;
@@ -642,15 +637,22 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
 
 static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
        qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
-       clear_bit(__QLCNIC_RESETTING, &adapter->state);
        set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
        set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
-       adapter->ahw->idc.quiesce_req = 0;
-       adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
-       adapter->ahw->idc.err_code = 0;
-       adapter->ahw->idc.collect_dump = 0;
+
+       ahw->idc.quiesce_req = 0;
+       ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+       ahw->idc.err_code = 0;
+       ahw->idc.collect_dump = 0;
+       ahw->reset_context = 0;
+       adapter->tx_timeo_cnt = 0;
+       ahw->idc.delay_reset = 0;
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
 }
 
 /**
@@ -851,6 +853,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
        /* Check for soft reset request */
        if (ahw->reset_context &&
            !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+               adapter->ahw->reset_context = 0;
                qlcnic_83xx_idc_tx_soft_reset(adapter);
                return ret;
        }
@@ -882,21 +885,41 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
        int ret = 0;
 
        if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
-               qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
                qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
                set_bit(__QLCNIC_RESETTING, &adapter->state);
                clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
                if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
                        qlcnic_83xx_disable_vnic_mode(adapter, 1);
-               qlcnic_83xx_idc_detach_driver(adapter);
+
+               if (qlcnic_check_diag_status(adapter)) {
+                       dev_info(&adapter->pdev->dev,
+                                "%s: Wait for diag completion\n", __func__);
+                       adapter->ahw->idc.delay_reset = 1;
+                       return 0;
+               } else {
+                       qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+                       qlcnic_83xx_idc_detach_driver(adapter);
+               }
        }
 
-       /* Check ACK from other functions */
-       ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
-       if (ret) {
+       if (qlcnic_check_diag_status(adapter)) {
                dev_info(&adapter->pdev->dev,
-                        "%s: Waiting for reset ACK\n", __func__);
-               return 0;
+                        "%s: Wait for diag completion\n", __func__);
+               return  -1;
+       } else {
+               if (adapter->ahw->idc.delay_reset) {
+                       qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+                       qlcnic_83xx_idc_detach_driver(adapter);
+                       adapter->ahw->idc.delay_reset = 0;
+               }
+
+               /* Check for ACK from other functions */
+               ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+               if (ret) {
+                       dev_info(&adapter->pdev->dev,
+                                "%s: Waiting for reset ACK\n", __func__);
+                       return -1;
+               }
        }
 
        /* Transit to INIT state and restart the HW */
@@ -914,6 +937,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
 static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
 {
        dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
        adapter->ahw->idc.err_code = -EIO;
 
        return 0;
@@ -1111,7 +1135,7 @@ qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
 {
        int ret = -EIO;
 
@@ -1530,9 +1554,18 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
 
 int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
 {
-       u8 *p_buff;
-       u32 addr, count;
        struct qlcnic_hardware_context *ahw = p_dev->ahw;
+       u32 addr, count, prev_ver, curr_ver;
+       u8 *p_buff;
+
+       if (ahw->reset.buff != NULL) {
+               prev_ver = p_dev->fw_version;
+               curr_ver = qlcnic_83xx_get_fw_version(p_dev);
+               if (curr_ver > prev_ver)
+                       kfree(ahw->reset.buff);
+               else
+                       return 0;
+       }
 
        ahw->reset.seq_error = 0;
        ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
@@ -2060,7 +2093,11 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
        audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
 
        if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
-               qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+               status = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                              QLCNIC_CMD_STOP_NIC_FUNC);
+               if (status)
+                       return;
+
                cmd.req.arg[1] = BIT_31;
                status = qlcnic_issue_cmd(adapter, &cmd);
                if (status)
index b0c3de9ede03657ea0b868ddc6c98203b16aac4d..599d1fda52f28a3209a5ae6c4aff4e1c4358abb6 100644 (file)
@@ -39,30 +39,21 @@ int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
        return 0;
 }
 
-static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
 {
        u8 id;
-       int i, ret = -EBUSY;
+       int ret = -EBUSY;
        u32 data = QLCNIC_MGMT_FUNC;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 
        if (qlcnic_83xx_lock_driver(adapter))
                return ret;
 
-       if (qlcnic_config_npars) {
-               for (i = 0; i < ahw->act_pci_func; i++) {
-                       id = adapter->npars[i].pci_func;
-                       if (id == ahw->pci_func)
-                               continue;
-                       data |= qlcnic_config_npars &
-                               QLC_83XX_SET_FUNC_OPMODE(0x3, id);
-               }
-       } else {
-               data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
-               data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) |
-                      QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC,
-                                               ahw->pci_func);
-       }
+       id = ahw->pci_func;
+       data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+       data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) |
+              QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id);
+
        QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
 
        qlcnic_83xx_unlock_driver(adapter);
@@ -196,20 +187,24 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
        else
                priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
                                                         ahw->pci_func);
-
-       if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+       switch (priv_level) {
+       case QLCNIC_NON_PRIV_FUNC:
                ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
                ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
                nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
-       } else if (priv_level == QLCNIC_PRIV_FUNC) {
+               break;
+       case QLCNIC_PRIV_FUNC:
                ahw->op_mode = QLCNIC_PRIV_FUNC;
                ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
                nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
-       } else if (priv_level == QLCNIC_MGMT_FUNC) {
+               break;
+       case QLCNIC_MGMT_FUNC:
                ahw->op_mode = QLCNIC_MGMT_FUNC;
                ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
                nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
-       } else {
+               break;
+       default:
+               dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
                return -EIO;
        }
 
@@ -218,8 +213,29 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
        else
                adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
 
-       adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
-       adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+       ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+       ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+       return 0;
+}
+
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlc_83xx_idc *idc = &ahw->idc;
+       u32 state;
+
+       state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+       while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) {
+               msleep(1000);
+               state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+       }
+
+       if (!idc->vnic_wait_limit) {
+               dev_err(&adapter->pdev->dev,
+                       "vNIC mode not operational, state check timed out.\n");
+               return -EIO;
+       }
 
        return 0;
 }
index 43562c256379539b8f739d4b72950886c067b75e..0d54fceda960434cc29c2e30b5151c8b7f02b6d6 100644 (file)
@@ -36,7 +36,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
        {QLCNIC_CMD_CONFIG_PORT, 4, 1},
        {QLCNIC_CMD_TEMP_SIZE, 4, 4},
        {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
-       {QLCNIC_CMD_SET_DRV_VER, 4, 1},
+       {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
+       {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
 };
 
 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -181,7 +182,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
        return cmd->rsp.arg[0];
 }
 
-int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
 {
        struct qlcnic_cmd_args cmd;
        u32 arg1, arg2, arg3;
@@ -193,7 +194,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
                 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
                 _QLCNIC_LINUX_SUBVERSION);
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
+       if (err)
+               return err;
+
        memcpy(&arg1, drv_string, sizeof(u32));
        memcpy(&arg2, drv_string + 4, sizeof(u32));
        memcpy(&arg3, drv_string + 8, sizeof(u32));
@@ -221,7 +225,10 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
 
        if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
                return err;
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = recv_ctx->context_id;
        cmd.req.arg[2] = mtu;
 
@@ -335,7 +342,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
        }
 
        phys_addr = hostrq_phys_addr;
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+       if (err)
+               goto out_free_rsp;
+
        cmd.req.arg[1] = MSD(phys_addr);
        cmd.req.arg[2] = LSD(phys_addr);
        cmd.req.arg[3] = rq_size;
@@ -373,10 +383,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
        recv_ctx->context_id = le16_to_cpu(prsp->context_id);
        recv_ctx->virt_port = prsp->virt_port;
 
+       qlcnic_free_mbx_args(&cmd);
 out_free_rsp:
        dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
-               cardrsp_phys_addr);
-       qlcnic_free_mbx_args(&cmd);
+                         cardrsp_phys_addr);
 out_free_rq:
        dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
        return err;
@@ -388,7 +398,10 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
        struct qlcnic_cmd_args cmd;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+       if (err)
+               return;
+
        cmd.req.arg[1] = recv_ctx->context_id;
        err = qlcnic_issue_cmd(adapter, &cmd);
        if (err)
@@ -457,7 +470,10 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
 
        phys_addr = rq_phys_addr;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+       if (err)
+               goto out_free_rsp;
+
        cmd.req.arg[1] = MSD(phys_addr);
        cmd.req.arg[2] = LSD(phys_addr);
        cmd.req.arg[3] = rq_size;
@@ -473,12 +489,13 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
                err = -EIO;
        }
 
+       qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
        dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
                          rsp_phys_addr);
-
 out_free_rq:
        dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
-       qlcnic_free_mbx_args(&cmd);
 
        return err;
 }
@@ -487,8 +504,11 @@ void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
                                   struct qlcnic_host_tx_ring *tx_ring)
 {
        struct qlcnic_cmd_args cmd;
+       int ret;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+       if (ret)
+               return;
 
        cmd.req.arg[1] = tx_ring->ctx_id;
        if (qlcnic_issue_cmd(adapter, &cmd))
@@ -503,7 +523,10 @@ qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
        int err;
        struct qlcnic_cmd_args cmd;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = config;
        err = qlcnic_issue_cmd(adapter, &cmd);
        qlcnic_free_mbx_args(&cmd);
@@ -707,7 +730,10 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
        struct qlcnic_cmd_args cmd;
        u32 mac_low, mac_high;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
        err = qlcnic_issue_cmd(adapter, &cmd);
 
@@ -746,7 +772,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
 
        nic_info = nic_info_addr;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+       if (err)
+               goto out_free_dma;
+
        cmd.req.arg[1] = MSD(nic_dma_t);
        cmd.req.arg[2] = LSD(nic_dma_t);
        cmd.req.arg[3] = (func_id << 16 | nic_size);
@@ -768,9 +797,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
                npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
        }
 
+       qlcnic_free_mbx_args(&cmd);
+out_free_dma:
        dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
                          nic_dma_t);
-       qlcnic_free_mbx_args(&cmd);
 
        return err;
 }
@@ -807,7 +837,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
        nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
        nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+       if (err)
+               goto out_free_dma;
+
        cmd.req.arg[1] = MSD(nic_dma_t);
        cmd.req.arg[2] = LSD(nic_dma_t);
        cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
@@ -819,9 +852,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
                err = -EIO;
        }
 
-       dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
-               nic_dma_t);
        qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+                         nic_dma_t);
 
        return err;
 }
@@ -845,7 +879,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
                return -ENOMEM;
 
        npar = pci_info_addr;
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+       if (err)
+               goto out_free_dma;
+
        cmd.req.arg[1] = MSD(pci_info_dma_t);
        cmd.req.arg[2] = LSD(pci_info_dma_t);
        cmd.req.arg[3] = pci_size;
@@ -873,20 +910,22 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
                err = -EIO;
        }
 
+       qlcnic_free_mbx_args(&cmd);
+out_free_dma:
        dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
                pci_info_dma_t);
-       qlcnic_free_mbx_args(&cmd);
 
        return err;
 }
 
 /* Configure eSwitch for port mirroring */
 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
-                               u8 enable_mirroring, u8 pci_func)
+                                u8 enable_mirroring, u8 pci_func)
 {
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_cmd_args cmd;
        int err = -EIO;
        u32 arg1;
-       struct qlcnic_cmd_args cmd;
 
        if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
            !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
@@ -895,18 +934,20 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
        arg1 = id | (enable_mirroring ? BIT_4 : 0);
        arg1 |= pci_func << 8;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_SET_PORTMIRRORING);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = arg1;
        err = qlcnic_issue_cmd(adapter, &cmd);
 
        if (err != QLCNIC_RCODE_SUCCESS)
-               dev_err(&adapter->pdev->dev,
-                       "Failed to configure port mirroring%d on eswitch:%d\n",
+               dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
                        pci_func, id);
        else
-               dev_info(&adapter->pdev->dev,
-                       "Configured eSwitch %d for port mirroring:%d\n",
-                       id, pci_func);
+               dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
+                        pci_func, id);
        qlcnic_free_mbx_args(&cmd);
 
        return err;
@@ -941,7 +982,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
        arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
        arg1 |= rx_tx << 15 | stats_size << 16;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_GET_ESWITCH_STATS);
+       if (err)
+               goto out_free_dma;
+
        cmd.req.arg[1] = arg1;
        cmd.req.arg[2] = MSD(stats_dma_t);
        cmd.req.arg[3] = LSD(stats_dma_t);
@@ -963,9 +1008,10 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
                esw_stats->numbytes = le64_to_cpu(stats->numbytes);
        }
 
-       dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
-               stats_dma_t);
        qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+                         stats_dma_t);
 
        return err;
 }
@@ -989,7 +1035,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
        if (!stats_addr)
                return -ENOMEM;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+       if (err)
+               goto out_free_dma;
+
        cmd.req.arg[1] = stats_size << 16;
        cmd.req.arg[2] = MSD(stats_dma_t);
        cmd.req.arg[3] = LSD(stats_dma_t);
@@ -1020,11 +1069,12 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
                        "%s: Get mac stats failed, err=%d.\n", __func__, err);
        }
 
-       dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
-               stats_dma_t);
-
        qlcnic_free_mbx_args(&cmd);
 
+out_free_dma:
+       dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+                         stats_dma_t);
+
        return err;
 }
 
@@ -1108,7 +1158,11 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
        arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
        arg1 |= BIT_14 | rx_tx << 15;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_GET_ESWITCH_STATS);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = arg1;
        err = qlcnic_issue_cmd(adapter, &cmd);
        qlcnic_free_mbx_args(&cmd);
@@ -1121,17 +1175,19 @@ err_ret:
        return -EIO;
 }
 
-static int
-__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
-                                       u32 *arg1, u32 *arg2)
+static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+                                           u32 *arg1, u32 *arg2)
 {
-       int err = -EIO;
+       struct device *dev = &adapter->pdev->dev;
        struct qlcnic_cmd_args cmd;
-       u8 pci_func;
-       pci_func = (*arg1 >> 8);
+       u8 pci_func = *arg1 >> 8;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+       if (err)
+               return err;
 
-       qlcnic_alloc_mbx_args(&cmd, adapter,
-                             QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
        cmd.req.arg[1] = *arg1;
        err = qlcnic_issue_cmd(adapter, &cmd);
        *arg1 = cmd.rsp.arg[1];
@@ -1139,12 +1195,11 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
        qlcnic_free_mbx_args(&cmd);
 
        if (err == QLCNIC_RCODE_SUCCESS)
-               dev_info(&adapter->pdev->dev,
-                        "eSwitch port config for pci func %d\n", pci_func);
+               dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
+                        pci_func);
        else
-               dev_err(&adapter->pdev->dev,
-                       "Failed to get eswitch port config for pci func %d\n",
-                                                               pci_func);
+               dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
+                       pci_func);
        return err;
 }
 /* Configure eSwitch port
@@ -1157,9 +1212,10 @@ op_type = 1 for port vlan_id
 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
                struct qlcnic_esw_func_cfg *esw_cfg)
 {
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_cmd_args cmd;
        int err = -EIO, index;
        u32 arg1, arg2 = 0;
-       struct qlcnic_cmd_args cmd;
        u8 pci_func;
 
        if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
@@ -1209,18 +1265,22 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
                return err;
        }
 
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH);
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_CONFIGURE_ESWITCH);
+       if (err)
+               return err;
+
        cmd.req.arg[1] = arg1;
        cmd.req.arg[2] = arg2;
        err = qlcnic_issue_cmd(adapter, &cmd);
        qlcnic_free_mbx_args(&cmd);
 
        if (err != QLCNIC_RCODE_SUCCESS)
-               dev_err(&adapter->pdev->dev,
-                       "Failed to configure eswitch pci func %d\n", pci_func);
+               dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
+                       pci_func);
        else
-               dev_info(&adapter->pdev->dev,
-                        "Configured eSwitch for pci func %d\n", pci_func);
+               dev_info(dev, "Configured eSwitch for vNIC function %d\n",
+                        pci_func);
 
        return err;
 }
index 08efb463500723a268923617157b4864a4345878..700a46324d09230b8d8ba6e6fc820bc0e56b9526 100644 (file)
@@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
        "ctx_lro_pkt_cnt",
        "ctx_ip_csum_error",
        "ctx_rx_pkts_wo_ctx",
-       "ctx_rx_pkts_dropped_wo_sts",
+       "ctx_rx_pkts_drop_wo_sds_on_card",
+       "ctx_rx_pkts_drop_wo_sds_on_host",
        "ctx_rx_osized_pkts",
        "ctx_rx_pkts_dropped_wo_rds",
        "ctx_rx_unexpected_mcast_pkts",
        "ctx_invalid_mac_address",
-       "ctx_rx_rds_ring_prim_attemoted",
+       "ctx_rx_rds_ring_prim_attempted",
        "ctx_rx_rds_ring_prim_success",
        "ctx_num_lro_flows_added",
        "ctx_num_lro_flows_removed",
@@ -251,6 +252,18 @@ static int
 qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+       if (qlcnic_82xx_check(adapter))
+               return qlcnic_82xx_get_settings(adapter, ecmd);
+       else if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_get_settings(adapter, ecmd);
+
+       return -EIO;
+}
+
+int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+                            struct ethtool_cmd *ecmd)
+{
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        u32 speed, reg;
        int check_sfp_module = 0;
@@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
        } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
                u32 val = 0;
-               if (qlcnic_83xx_check(adapter))
-                       qlcnic_83xx_get_settings(adapter);
-               else
-                       val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+               val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
 
                if (val == QLCNIC_PORT_MODE_802_3_AP) {
                        ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        ecmd->advertising = ADVERTISED_10000baseT_Full;
                }
 
-               if (netif_running(dev) && adapter->ahw->has_link_events) {
-                       if (qlcnic_82xx_check(adapter)) {
-                               reg = QLCRD32(adapter,
-                                             P3P_LINK_SPEED_REG(pcifn));
-                               speed = P3P_LINK_SPEED_VAL(pcifn, reg);
-                               ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
-                       }
-                       ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
-                       ecmd->autoneg = adapter->ahw->link_autoneg;
-                       ecmd->duplex = adapter->ahw->link_duplex;
+               if (netif_running(adapter->netdev) && ahw->has_link_events) {
+                       reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
+                       speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+                       ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+                       ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+                       ecmd->autoneg = ahw->link_autoneg;
+                       ecmd->duplex = ahw->link_duplex;
                        goto skip;
                }
 
@@ -340,8 +347,8 @@ skip:
        case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
                ecmd->advertising |= ADVERTISED_TP;
                ecmd->supported |= SUPPORTED_TP;
-               check_sfp_module = netif_running(dev) &&
-                                  adapter->ahw->has_link_events;
+               check_sfp_module = netif_running(adapter->netdev) &&
+                                  ahw->has_link_events;
        case QLCNIC_BRDTYPE_P3P_10G_XFP:
                ecmd->supported |= SUPPORTED_FIBRE;
                ecmd->advertising |= ADVERTISED_FIBRE;
@@ -355,8 +362,8 @@ skip:
                        ecmd->advertising |=
                                (ADVERTISED_FIBRE | ADVERTISED_TP);
                        ecmd->port = PORT_FIBRE;
-                       check_sfp_module = netif_running(dev) &&
-                                          adapter->ahw->has_link_events;
+                       check_sfp_module = netif_running(adapter->netdev) &&
+                                          ahw->has_link_events;
                } else {
                        ecmd->autoneg = AUTONEG_ENABLE;
                        ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -365,13 +372,6 @@ skip:
                        ecmd->port = PORT_TP;
                }
                break;
-       case QLCNIC_BRDTYPE_83XX_10G:
-               ecmd->autoneg = AUTONEG_DISABLE;
-               ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
-               ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
-               ecmd->port = PORT_FIBRE;
-               check_sfp_module = netif_running(dev) && ahw->has_link_events;
-               break;
        default:
                dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
                        adapter->ahw->board_type);
@@ -846,7 +846,9 @@ static int qlcnic_irq_test(struct net_device *netdev)
                goto clear_diag_irq;
 
        ahw->diag_cnt = 0;
-       qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+       if (ret)
+               goto free_diag_res;
 
        cmd.req.arg[1] = ahw->pci_func;
        ret = qlcnic_issue_cmd(adapter, &cmd);
@@ -858,6 +860,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
 
 done:
        qlcnic_free_mbx_args(&cmd);
+
+free_diag_res:
        qlcnic_diag_free_res(netdev, max_sds_rings);
 
 clear_diag_irq:
index c0f0c0d0a7908822461cb74953d58a682cd85098..d262211b03b3748f6d398badb1edf6bed54e1531 100644 (file)
@@ -672,6 +672,7 @@ enum {
 #define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT     10
 
 #define QLCNIC_MAX_MC_COUNT            38
+#define QLCNIC_MAX_UC_COUNT            512
 #define QLCNIC_WATCHDOG_TIMEOUTVALUE   5
 
 #define        ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
index 6a6512ba9f38824f0d72a844aaa0d96a95415a8f..5b5d2edf125d9f93920b13d83fbfa802c96140fc 100644 (file)
@@ -499,6 +499,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
 void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct netdev_hw_addr *ha;
        static const u8 bcast_addr[ETH_ALEN] = {
                0xff, 0xff, 0xff, 0xff, 0xff, 0xff
@@ -515,25 +516,30 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
        if (netdev->flags & IFF_PROMISC) {
                if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
                        mode = VPORT_MISS_MODE_ACCEPT_ALL;
-               goto send_fw_cmd;
-       }
-
-       if ((netdev->flags & IFF_ALLMULTI) ||
-           (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) {
-               mode = VPORT_MISS_MODE_ACCEPT_MULTI;
-               goto send_fw_cmd;
+       } else if (netdev->flags & IFF_ALLMULTI) {
+               if (netdev_mc_count(netdev) > ahw->max_mc_count) {
+                       mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+               } else if (!netdev_mc_empty(netdev) &&
+                          !qlcnic_sriov_vf_check(adapter)) {
+                               netdev_for_each_mc_addr(ha, netdev)
+                                       qlcnic_nic_add_mac(adapter, ha->addr,
+                                                          vlan);
+               }
+               if (mode != VPORT_MISS_MODE_ACCEPT_MULTI &&
+                   qlcnic_sriov_vf_check(adapter))
+                       qlcnic_vf_add_mc_list(netdev, vlan);
        }
 
-       if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) {
-               netdev_for_each_mc_addr(ha, netdev) {
+       /* configure unicast MAC address, if there is not sufficient space
+        * to store all the unicast addresses then enable promiscuous mode
+        */
+       if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       } else if (!netdev_uc_empty(netdev)) {
+               netdev_for_each_uc_addr(ha, netdev)
                        qlcnic_nic_add_mac(adapter, ha->addr, vlan);
-               }
        }
 
-       if (qlcnic_sriov_vf_check(adapter))
-               qlcnic_vf_add_mc_list(netdev, vlan);
-
-send_fw_cmd:
        if (!qlcnic_sriov_vf_check(adapter)) {
                if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
                    !adapter->fdb_mac_learn) {
@@ -780,7 +786,8 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
        word = 0;
        if (enable) {
                word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK;
-               if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6)
+               if (adapter->ahw->extra_capability[0] &
+                   QLCNIC_FW_CAP2_HW_LRO_IPV6)
                        word |= QLCNIC_ENABLE_IPV6_LRO |
                                QLCNIC_NO_DEST_IPV6_CHECK;
        }
@@ -973,16 +980,57 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
        return rc;
 }
 
+static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
+                                             netdev_features_t features)
+{
+       u32 offload_flags = adapter->offload_flags;
+
+       if (offload_flags & BIT_0) {
+               features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+                           NETIF_F_IPV6_CSUM;
+               adapter->rx_csum = 1;
+               if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+                       if (!(offload_flags & BIT_1))
+                               features &= ~NETIF_F_TSO;
+                       else
+                               features |= NETIF_F_TSO;
+
+                       if (!(offload_flags & BIT_2))
+                               features &= ~NETIF_F_TSO6;
+                       else
+                               features |= NETIF_F_TSO6;
+               }
+       } else {
+               features &= ~(NETIF_F_RXCSUM |
+                             NETIF_F_IP_CSUM |
+                             NETIF_F_IPV6_CSUM);
+
+               if (QLCNIC_IS_TSO_CAPABLE(adapter))
+                       features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+               adapter->rx_csum = 0;
+       }
+
+       return features;
+}
 
 netdev_features_t qlcnic_fix_features(struct net_device *netdev,
        netdev_features_t features)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       netdev_features_t changed;
 
-       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
-           qlcnic_82xx_check(adapter)) {
-               netdev_features_t changed = features ^ netdev->features;
-               features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+       if (qlcnic_82xx_check(adapter) &&
+           (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+               if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
+                       features = qlcnic_process_flags(adapter, features);
+               } else {
+                       changed = features ^ netdev->features;
+                       features ^= changed & (NETIF_F_RXCSUM |
+                                              NETIF_F_IP_CSUM |
+                                              NETIF_F_IPV6_CSUM |
+                                              NETIF_F_TSO |
+                                              NETIF_F_TSO6);
+               }
        }
 
        if (!(features & NETIF_F_RXCSUM))
@@ -1462,6 +1510,21 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
        return rv;
 }
 
+int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS);
+       if (!err) {
+               err = qlcnic_issue_cmd(adapter, &cmd);
+               if (!err)
+                       *h_state = cmd.rsp.arg[1];
+       }
+       qlcnic_free_mbx_args(&cmd);
+       return err;
+}
+
 void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
 {
        void __iomem *msix_base_addr;
@@ -1514,3 +1577,54 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
 {
        qlcnic_pcie_sem_unlock(adapter, 5);
 }
+
+int qlcnic_82xx_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       int retval;
+
+       netif_device_detach(netdev);
+
+       qlcnic_cancel_idc_work(adapter);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       qlcnic_clr_all_drv_state(adapter, 0);
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+       if (qlcnic_wol_supported(adapter)) {
+               pci_enable_wake(pdev, PCI_D3cold, 1);
+               pci_enable_wake(pdev, PCI_D3hot, 1);
+       }
+
+       return 0;
+}
+
+int qlcnic_82xx_resume(struct qlcnic_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err;
+
+       err = qlcnic_start_firmware(adapter);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "failed to start firmware\n");
+               return err;
+       }
+
+       if (netif_running(netdev)) {
+               err = qlcnic_up(adapter, netdev);
+               if (!err)
+                       qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+       }
+
+       netif_device_attach(netdev);
+       qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+       return err;
+}
index 95b1b57328385dac93c0268fb6bae262ae20a793..2c22504f57aac090b5cfecf9ec819f876b2ef6ed 100644 (file)
@@ -86,7 +86,8 @@ enum qlcnic_regs {
 #define QLCNIC_CMD_BC_EVENT_SETUP              0x31
 #define        QLCNIC_CMD_CONFIG_VPORT                 0x32
 #define QLCNIC_CMD_GET_MAC_STATS               0x37
-#define QLCNIC_CMD_SET_DRV_VER                 0x38
+#define QLCNIC_CMD_82XX_SET_DRV_VER            0x38
+#define QLCNIC_CMD_GET_LED_STATUS              0x3C
 #define QLCNIC_CMD_CONFIGURE_RSS               0x41
 #define QLCNIC_CMD_CONFIG_INTR_COAL            0x43
 #define QLCNIC_CMD_CONFIGURE_LED               0x44
@@ -102,6 +103,7 @@ enum qlcnic_regs {
 #define QLCNIC_CMD_GET_LINK_STATUS             0x68
 #define QLCNIC_CMD_SET_LED_CONFIG              0x69
 #define QLCNIC_CMD_GET_LED_CONFIG              0x6A
+#define QLCNIC_CMD_83XX_SET_DRV_VER            0x6F
 #define QLCNIC_CMD_ADD_RCV_RINGS               0x0B
 
 #define QLCNIC_INTRPT_INTX                     1
@@ -134,7 +136,7 @@ struct qlcnic_mailbox_metadata {
 
 #define QLCNIC_SET_OWNER        1
 #define QLCNIC_CLR_OWNER        0
-#define QLCNIC_MBX_TIMEOUT      10000
+#define QLCNIC_MBX_TIMEOUT      5000
 
 #define QLCNIC_MBX_RSP_OK      1
 #define QLCNIC_MBX_PORT_RSP_OK 0x1a
@@ -197,4 +199,8 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
 void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
 void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
 void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+int qlcnic_82xx_shutdown(struct pci_dev *);
+int qlcnic_82xx_resume(struct qlcnic_adapter *);
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
+void qlcnic_fw_poll_work(struct work_struct *work);
 #endif                         /* __QLCNIC_HW_H_ */
index 264d5a4f81538a23653c370a7d85df18617c09be..4528f8ec333bb50d01c116958cb1a86c7392ef7c 100644 (file)
@@ -37,25 +37,21 @@ MODULE_PARM_DESC(qlcnic_mac_learn,
                 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
 
 int qlcnic_use_msi = 1;
-MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
 module_param_named(use_msi, qlcnic_use_msi, int, 0444);
 
 int qlcnic_use_msi_x = 1;
-MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
 module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
 
 int qlcnic_auto_fw_reset = 1;
-MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
 module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
 
 int qlcnic_load_fw_file;
-MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
 module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
 
-int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0444);
-MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
-
 static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void qlcnic_remove(struct pci_dev *pdev);
 static int qlcnic_open(struct net_device *netdev);
@@ -63,13 +59,11 @@ static int qlcnic_close(struct net_device *netdev);
 static void qlcnic_tx_timeout(struct net_device *netdev);
 static void qlcnic_attach_work(struct work_struct *work);
 static void qlcnic_fwinit_work(struct work_struct *work);
-static void qlcnic_fw_poll_work(struct work_struct *work);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev);
 #endif
 
 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
-static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
 
 static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
@@ -84,14 +78,9 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
-static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
-                               struct qlcnic_esw_func_cfg *);
 static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
 static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
 
-#define QLCNIC_IS_TSO_CAPABLE(adapter) \
-       ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
-
 static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -308,6 +297,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
        return 0;
 }
 
+static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mac_list_s *cur;
+       struct list_head *head;
+
+       list_for_each(head, &adapter->mac_list) {
+               cur = list_entry(head, struct qlcnic_mac_list_s, list);
+               if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+                       qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+                                                 0, QLCNIC_MAC_DEL);
+                       list_del(&cur->list);
+                       kfree(cur);
+                       return;
+               }
+       }
+}
+
 static int qlcnic_set_mac(struct net_device *netdev, void *p)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -322,11 +328,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EINVAL;
 
+       if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+               return 0;
+
        if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                netif_device_detach(netdev);
                qlcnic_napi_disable(adapter);
        }
 
+       qlcnic_delete_adapter_mac(adapter);
        memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        qlcnic_set_multi(adapter->netdev);
@@ -348,12 +358,15 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
                return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
 
        if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
-               if (is_unicast_ether_addr(addr))
-                       err = qlcnic_nic_del_mac(adapter, addr);
-               else if (is_multicast_ether_addr(addr))
+               if (is_unicast_ether_addr(addr)) {
+                       err = dev_uc_del(netdev, addr);
+                       if (!err)
+                               err = qlcnic_nic_del_mac(adapter, addr);
+               } else if (is_multicast_ether_addr(addr)) {
                        err = dev_mc_del(netdev, addr);
-               else
+               } else {
                        err =  -EINVAL;
+               }
        }
        return err;
 }
@@ -376,12 +389,16 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        if (ether_addr_equal(addr, adapter->mac_addr))
                return err;
 
-       if (is_unicast_ether_addr(addr))
-               err = qlcnic_nic_add_mac(adapter, addr, 0);
-       else if (is_multicast_ether_addr(addr))
+       if (is_unicast_ether_addr(addr)) {
+               if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count)
+                       err = dev_uc_add_excl(netdev, addr);
+               else
+                       err = -ENOMEM;
+       } else if (is_multicast_ether_addr(addr)) {
                err = dev_mc_add_excl(netdev, addr);
-       else
+       } else {
                err = -EINVAL;
+       }
 
        return err;
 }
@@ -433,6 +450,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_set_vf_tx_rate     = qlcnic_sriov_set_vf_tx_rate,
        .ndo_get_vf_config      = qlcnic_sriov_get_vf_config,
        .ndo_set_vf_vlan        = qlcnic_sriov_set_vf_vlan,
+       .ndo_set_vf_spoofchk    = qlcnic_sriov_set_vf_spoofchk,
 #endif
 };
 
@@ -449,6 +467,8 @@ static struct qlcnic_nic_template qlcnic_ops = {
        .napi_add               = qlcnic_82xx_napi_add,
        .napi_del               = qlcnic_82xx_napi_del,
        .config_ipaddr          = qlcnic_82xx_config_ipaddr,
+       .shutdown               = qlcnic_82xx_shutdown,
+       .resume                 = qlcnic_82xx_resume,
        .clear_legacy_intr      = qlcnic_82xx_clear_legacy_intr,
 };
 
@@ -492,6 +512,7 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
        .config_promisc_mode            = qlcnic_82xx_nic_set_promisc,
        .change_l2_filter               = qlcnic_82xx_change_filter,
        .get_board_info                 = qlcnic_82xx_get_board_info,
+       .set_mac_filter_count           = qlcnic_82xx_set_mac_filter_count,
        .free_mac_list                  = qlcnic_82xx_free_mac_list,
 };
 
@@ -752,7 +773,7 @@ static int
 qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
 {
        u8 id;
-       int i, ret = 1;
+       int ret;
        u32 data = QLCNIC_MGMT_FUNC;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 
@@ -760,20 +781,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
        if (ret)
                goto err_lock;
 
-       if (qlcnic_config_npars) {
-               for (i = 0; i < ahw->act_pci_func; i++) {
-                       id = adapter->npars[i].pci_func;
-                       if (id == ahw->pci_func)
-                               continue;
-                       data |= (qlcnic_config_npars &
-                                       QLC_DEV_SET_DRV(0xf, id));
-               }
-       } else {
-               data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
-               data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
-                       (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
-                                        ahw->pci_func));
-       }
+       id = ahw->pci_func;
+       data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+       data = (data & ~QLC_DEV_SET_DRV(0xf, id)) |
+              QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id);
        QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
        qlcnic_api_unlock(adapter);
 err_lock:
@@ -859,6 +870,27 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
        return 0;
 }
 
+static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+                                               int index)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       unsigned short subsystem_vendor;
+       bool ret = true;
+
+       subsystem_vendor = pdev->subsystem_vendor;
+
+       if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+               if (qlcnic_boards[index].sub_vendor == subsystem_vendor &&
+                   qlcnic_boards[index].sub_device == pdev->subsystem_device)
+                       ret = true;
+               else
+                       ret = false;
+       }
+
+       return ret;
+}
+
 static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
 {
        struct pci_dev *pdev = adapter->pdev;
@@ -866,20 +898,18 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
 
        for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
                if (qlcnic_boards[i].vendor == pdev->vendor &&
-                       qlcnic_boards[i].device == pdev->device &&
-                       qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
-                       qlcnic_boards[i].sub_device == pdev->subsystem_device) {
-                               sprintf(name, "%pM: %s" ,
-                                       adapter->mac_addr,
-                                       qlcnic_boards[i].short_name);
-                               found = 1;
-                               break;
+                   qlcnic_boards[i].device == pdev->device &&
+                   qlcnic_validate_subsystem_id(adapter, i)) {
+                       found = 1;
+                       break;
                }
-
        }
 
        if (!found)
                sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
+       else
+               sprintf(name, "%pM: %s" , adapter->mac_addr,
+                       qlcnic_boards[i].short_name);
 }
 
 static void
@@ -964,7 +994,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
        if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
                u32 temp;
                temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
-               adapter->ahw->capabilities2 = temp;
+               adapter->ahw->extra_capability[0] = temp;
        }
        adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
        adapter->ahw->max_mtu = nic_info.max_mtu;
@@ -1053,8 +1083,6 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
 
        if (!esw_cfg->promisc_mode)
                adapter->flags |= QLCNIC_PROMISC_DISABLED;
-
-       qlcnic_set_netdev_features(adapter, esw_cfg);
 }
 
 int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
@@ -1069,51 +1097,23 @@ int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
                        return -EIO;
        qlcnic_set_vlan_config(adapter, &esw_cfg);
        qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+       qlcnic_set_netdev_features(adapter, &esw_cfg);
 
        return 0;
 }
 
-static void
-qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
-               struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+                               struct qlcnic_esw_func_cfg *esw_cfg)
 {
        struct net_device *netdev = adapter->netdev;
-       unsigned long features, vlan_features;
 
        if (qlcnic_83xx_check(adapter))
                return;
 
-       features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                   NETIF_F_IPV6_CSUM | NETIF_F_GRO);
-       vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
-                       NETIF_F_IPV6_CSUM);
-
-       if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
-               features |= (NETIF_F_TSO | NETIF_F_TSO6);
-               vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
-       }
-
-       if (netdev->features & NETIF_F_LRO)
-               features |= NETIF_F_LRO;
-
-       if (esw_cfg->offload_flags & BIT_0) {
-               netdev->features |= features;
-               adapter->rx_csum = 1;
-               if (!(esw_cfg->offload_flags & BIT_1)) {
-                       netdev->features &= ~NETIF_F_TSO;
-                       features &= ~NETIF_F_TSO;
-               }
-               if (!(esw_cfg->offload_flags & BIT_2)) {
-                       netdev->features &= ~NETIF_F_TSO6;
-                       features &= ~NETIF_F_TSO6;
-               }
-       } else {
-               netdev->features &= ~features;
-               features &= ~features;
-               adapter->rx_csum = 0;
-       }
-
-       netdev->vlan_features = (features & vlan_features);
+       adapter->offload_flags = esw_cfg->offload_flags;
+       adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
+       netdev_update_features(netdev);
+       adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
 }
 
 static int
@@ -1409,16 +1409,23 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
                        for (ring = 0; ring < num_sds_rings; ring++) {
                                sds_ring = &recv_ctx->sds_rings[ring];
                                if (qlcnic_82xx_check(adapter) &&
-                                   (ring == (num_sds_rings - 1)))
-                                       snprintf(sds_ring->name,
-                                                sizeof(sds_ring->name),
-                                                "qlcnic-%s[Tx0+Rx%d]",
-                                                netdev->name, ring);
-                               else
+                                   (ring == (num_sds_rings - 1))) {
+                                       if (!(adapter->flags &
+                                             QLCNIC_MSIX_ENABLED))
+                                               snprintf(sds_ring->name,
+                                                        sizeof(sds_ring->name),
+                                                        "qlcnic");
+                                       else
+                                               snprintf(sds_ring->name,
+                                                        sizeof(sds_ring->name),
+                                                        "%s-tx-0-rx-%d",
+                                                        netdev->name, ring);
+                               } else {
                                        snprintf(sds_ring->name,
                                                 sizeof(sds_ring->name),
-                                                "qlcnic-%s[Rx%d]",
+                                                "%s-rx-%d",
                                                 netdev->name, ring);
+                               }
                                err = request_irq(sds_ring->irq, handler, flags,
                                                  sds_ring->name, sds_ring);
                                if (err)
@@ -1433,7 +1440,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
                             ring++) {
                                tx_ring = &adapter->tx_ring[ring];
                                snprintf(tx_ring->name, sizeof(tx_ring->name),
-                                        "qlcnic-%s[Tx%d]", netdev->name, ring);
+                                        "%s-tx-%d", netdev->name, ring);
                                err = request_irq(tx_ring->irq, handler, flags,
                                                  tx_ring->name, tx_ring);
                                if (err)
@@ -1479,7 +1486,7 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
        u32 capab = 0;
 
        if (qlcnic_82xx_check(adapter)) {
-               if (adapter->ahw->capabilities2 &
+               if (adapter->ahw->extra_capability[0] &
                    QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
                        adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
        } else {
@@ -1830,6 +1837,22 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
        return err;
 }
 
+void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u16 act_pci_fn = ahw->act_pci_func;
+       u16 count;
+
+       ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
+       if (act_pci_fn <= 2)
+               count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) /
+                        act_pci_fn;
+       else
+               count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) /
+                        act_pci_fn;
+       ahw->max_uc_count = count;
+}
+
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                    int pci_using_dac)
@@ -1839,7 +1862,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
 
        adapter->rx_csum = 1;
        adapter->ahw->mc_enabled = 0;
-       adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
+       qlcnic_set_mac_filter_count(adapter);
 
        netdev->netdev_ops         = &qlcnic_netdev_ops;
        netdev->watchdog_timeo     = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
@@ -1877,6 +1900,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                netdev->features |= NETIF_F_LRO;
 
        netdev->hw_features = netdev->features;
+       netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
        err = register_netdev(netdev);
@@ -1961,6 +1985,21 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
        return 0;
 }
 
+void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 fw_cmd = 0;
+
+       if (qlcnic_82xx_check(adapter))
+               fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER;
+       else if (qlcnic_83xx_check(adapter))
+               fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
+
+       if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) &&
+           (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER))
+               qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
+}
+
 static int
 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -1968,7 +2007,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct qlcnic_adapter *adapter = NULL;
        struct qlcnic_hardware_context *ahw;
        int err, pci_using_dac = -1;
-       u32 capab2;
        char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
 
        if (pdev->is_virtfn)
@@ -1995,8 +2033,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_enable_pcie_error_reporting(pdev);
 
        ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
-       if (!ahw)
+       if (!ahw) {
+               err = -ENOMEM;
                goto err_out_free_res;
+       }
 
        switch (ent->device) {
        case PCI_DEVICE_ID_QLOGIC_QLE824X:
@@ -2032,6 +2072,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
        if (adapter->qlcnic_wq == NULL) {
+               err = -ENOMEM;
                dev_err(&pdev->dev, "Failed to create workqueue\n");
                goto err_out_free_netdev;
        }
@@ -2112,17 +2153,15 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        goto err_out_disable_msi;
        }
 
+       err = qlcnic_get_act_pci_func(adapter);
+       if (err)
+               goto err_out_disable_mbx_intr;
+
        err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
        if (err)
                goto err_out_disable_mbx_intr;
 
-       if (qlcnic_82xx_check(adapter)) {
-               if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
-                       capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
-                       if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB)
-                               qlcnic_fw_cmd_set_drv_version(adapter);
-               }
-       }
+       qlcnic_set_drv_version(adapter);
 
        pci_set_drvdata(pdev, adapter);
 
@@ -2141,9 +2180,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                break;
        }
 
-       if (qlcnic_get_act_pci_func(adapter))
-               goto err_out_disable_mbx_intr;
-
        if (adapter->drv_mac_learn)
                qlcnic_alloc_lb_filters_mem(adapter);
 
@@ -2241,37 +2277,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
        kfree(ahw);
        free_netdev(netdev);
 }
-static int __qlcnic_shutdown(struct pci_dev *pdev)
-{
-       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev = adapter->netdev;
-       int retval;
-
-       netif_device_detach(netdev);
-
-       qlcnic_cancel_idc_work(adapter);
-
-       if (netif_running(netdev))
-               qlcnic_down(adapter, netdev);
-
-       qlcnic_sriov_cleanup(adapter);
-       if (qlcnic_82xx_check(adapter))
-               qlcnic_clr_all_drv_state(adapter, 0);
-
-       clear_bit(__QLCNIC_RESETTING, &adapter->state);
-
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
-       if (qlcnic_82xx_check(adapter)) {
-               if (qlcnic_wol_supported(adapter)) {
-                       pci_enable_wake(pdev, PCI_D3cold, 1);
-                       pci_enable_wake(pdev, PCI_D3hot, 1);
-               }
-       }
-
-       return 0;
-}
 
 static void qlcnic_shutdown(struct pci_dev *pdev)
 {
@@ -2282,8 +2287,7 @@ static void qlcnic_shutdown(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_PM
-static int
-qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        int retval;
 
@@ -2295,11 +2299,9 @@ qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
        return 0;
 }
 
-static int
-qlcnic_resume(struct pci_dev *pdev)
+static int qlcnic_resume(struct pci_dev *pdev)
 {
        struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
-       struct net_device *netdev = adapter->netdev;
        int err;
 
        err = pci_enable_device(pdev);
@@ -2310,23 +2312,7 @@ qlcnic_resume(struct pci_dev *pdev)
        pci_set_master(pdev);
        pci_restore_state(pdev);
 
-       err = qlcnic_start_firmware(adapter);
-       if (err) {
-               dev_err(&pdev->dev, "failed to start firmware\n");
-               return err;
-       }
-
-       if (netif_running(netdev)) {
-               err = qlcnic_up(adapter, netdev);
-               if (err)
-                       goto done;
-
-               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
-       }
-done:
-       netif_device_attach(netdev);
-       qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
-       return 0;
+       return  __qlcnic_resume(adapter);
 }
 #endif
 
@@ -2481,12 +2467,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return;
 
-       dev_err(&netdev->dev, "transmit timeout, resetting.\n");
-
-       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
-               adapter->need_fw_reset = 1;
-       else
+       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
+               netdev_info(netdev, "Tx timeout, reset the adapter.\n");
+               if (qlcnic_82xx_check(adapter))
+                       adapter->need_fw_reset = 1;
+               else if (qlcnic_83xx_check(adapter))
+                       qlcnic_83xx_idc_request_reset(adapter,
+                                                     QLCNIC_FORCE_FW_DUMP_KEY);
+       } else {
+               netdev_info(netdev, "Tx timeout, reset adapter context.\n");
                adapter->ahw->reset_context = 1;
+       }
 }
 
 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -2660,8 +2651,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-static void
-qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
 {
        u32  val;
 
@@ -3091,6 +3081,7 @@ done:
        adapter->fw_fail_cnt = 0;
        adapter->flags &= ~QLCNIC_FW_HANG;
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
+       qlcnic_set_drv_version(adapter);
 
        if (!qlcnic_clr_drv_state(adapter))
                qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
@@ -3123,10 +3114,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
                if (adapter->need_fw_reset)
                        goto detach;
 
-               if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
+               if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
                        qlcnic_reset_hw_context(adapter);
-                       adapter->netdev->trans_start = jiffies;
-               }
 
                return 0;
        }
@@ -3173,8 +3162,7 @@ detach:
        return 1;
 }
 
-static void
-qlcnic_fw_poll_work(struct work_struct *work)
+void qlcnic_fw_poll_work(struct work_struct *work)
 {
        struct qlcnic_adapter *adapter = container_of(work,
                                struct qlcnic_adapter, fw_work.work);
@@ -3226,7 +3214,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
        if (err)
                return err;
 
-       pci_set_power_state(pdev, PCI_D0);
        pci_set_master(pdev);
        pci_restore_state(pdev);
 
@@ -3524,7 +3511,7 @@ static int qlcnic_netdev_event(struct notifier_block *this,
                                 unsigned long event, void *ptr)
 {
        struct qlcnic_adapter *adapter;
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
 recheck:
        if (dev == NULL)
index 4b9bab18ebd9b637cbc101e9e7e663cf9d62d709..ab8a6744d402f43e794007db16ee218916051675 100644 (file)
@@ -15,6 +15,7 @@
 #define QLC_83XX_MINIDUMP_FLASH                0x520000
 #define QLC_83XX_OCM_INDEX                     3
 #define QLC_83XX_PCI_INDEX                     0
+#define QLC_83XX_DMA_ENGINE_INDEX              8
 
 static const u32 qlcnic_ms_read_data[] = {
        0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
@@ -32,6 +33,16 @@ static const u32 qlcnic_ms_read_data[] = {
 
 #define QLCNIC_DUMP_MASK_MAX   0xff
 
+struct qlcnic_pex_dma_descriptor {
+       u32     read_data_size;
+       u32     dma_desc_cmd;
+       u32     src_addr_low;
+       u32     src_addr_high;
+       u32     dma_bus_addr_low;
+       u32     dma_bus_addr_high;
+       u32     rsvd[6];
+} __packed;
+
 struct qlcnic_common_entry_hdr {
        u32     type;
        u32     offset;
@@ -90,7 +101,10 @@ struct __ocm {
 } __packed;
 
 struct __mem {
-       u8      rsvd[24];
+       u32     desc_card_addr;
+       u32     dma_desc_cmd;
+       u32     start_dma_cmd;
+       u32     rsvd[3];
        u32     addr;
        u32     size;
 } __packed;
@@ -466,12 +480,12 @@ skip_poll:
        return l2->no_ops * l2->read_addr_num * sizeof(u32);
 }
 
-static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
-                             struct qlcnic_dump_entry *entry, __le32 *buffer)
+static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
+                                        struct __mem *mem, __le32 *buffer,
+                                        int *ret)
 {
-       u32 addr, data, test, ret = 0;
+       u32 addr, data, test;
        int i, reg_read;
-       struct __mem *mem = &entry->region.mem;
 
        reg_read = mem->size;
        addr = mem->addr;
@@ -480,7 +494,8 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
                dev_info(&adapter->pdev->dev,
                         "Unaligned memory addr:0x%x size:0x%x\n",
                         addr, reg_read);
-               return -EINVAL;
+               *ret = -EINVAL;
+               return 0;
        }
 
        mutex_lock(&adapter->ahw->mem_lock);
@@ -499,7 +514,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
                        if (printk_ratelimit()) {
                                dev_err(&adapter->pdev->dev,
                                        "failed to read through agent\n");
-                               ret = -EINVAL;
+                               *ret = -EIO;
                                goto out;
                        }
                }
@@ -516,6 +531,181 @@ out:
        return mem->size;
 }
 
+/* DMA register base address */
+#define QLC_DMA_REG_BASE_ADDR(dma_no)  (0x77320000 + (dma_no * 0x10000))
+
+/* DMA register offsets w.r.t base address */
+#define QLC_DMA_CMD_BUFF_ADDR_LOW      0
+#define QLC_DMA_CMD_BUFF_ADDR_HI       4
+#define QLC_DMA_CMD_STATUS_CTRL                8
+
+#define QLC_PEX_DMA_READ_SIZE          (PAGE_SIZE * 16)
+
+static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
+                               struct __mem *mem)
+{
+       struct qlcnic_dump_template_hdr *tmpl_hdr;
+       struct device *dev = &adapter->pdev->dev;
+       u32 dma_no, dma_base_addr, temp_addr;
+       int i, ret, dma_sts;
+
+       tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+       dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+       dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
+
+       temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
+       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
+                                          mem->desc_card_addr);
+       if (ret)
+               return ret;
+
+       temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
+       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
+       if (ret)
+               return ret;
+
+       temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
+                                          mem->start_dma_cmd);
+       if (ret)
+               return ret;
+
+       /* Wait for DMA to complete */
+       temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+       for (i = 0; i < 400; i++) {
+               dma_sts = qlcnic_ind_rd(adapter, temp_addr);
+
+               if (dma_sts & BIT_1)
+                       usleep_range(250, 500);
+               else
+                       break;
+       }
+
+       if (i >= 400) {
+               dev_info(dev, "PEX DMA operation timed out");
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
+                                    struct __mem *mem,
+                                    __le32 *buffer, int *ret)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       u32 temp, dma_base_addr, size = 0, read_size = 0;
+       struct qlcnic_pex_dma_descriptor *dma_descr;
+       struct qlcnic_dump_template_hdr *tmpl_hdr;
+       struct device *dev = &adapter->pdev->dev;
+       dma_addr_t dma_phys_addr;
+       void *dma_buffer;
+
+       tmpl_hdr = fw_dump->tmpl_hdr;
+
+       /* Check if DMA engine is available */
+       temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+       dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
+       temp = qlcnic_ind_rd(adapter,
+                            dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
+
+       if (!(temp & BIT_31)) {
+               dev_info(dev, "%s: DMA engine is not available\n", __func__);
+               *ret = -EIO;
+               return 0;
+       }
+
+       /* Create DMA descriptor */
+       dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
+                           GFP_KERNEL);
+       if (!dma_descr) {
+               *ret = -ENOMEM;
+               return 0;
+       }
+
+       /* dma_desc_cmd  0:15  = 0
+        * dma_desc_cmd 16:19  = mem->dma_desc_cmd 0:3
+        * dma_desc_cmd 20:23  = pci function number
+        * dma_desc_cmd 24:31  = mem->dma_desc_cmd 8:15
+        */
+       dma_phys_addr = fw_dump->phys_addr;
+       dma_buffer = fw_dump->dma_buffer;
+       temp = 0;
+       temp = mem->dma_desc_cmd & 0xff0f;
+       temp |= (adapter->ahw->pci_func & 0xf) << 4;
+       dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
+       dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
+       dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
+       dma_descr->src_addr_high = 0;
+
+       /* Collect memory dump using multiple DMA operations if required */
+       while (read_size < mem->size) {
+               if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
+                       size = QLC_PEX_DMA_READ_SIZE;
+               else
+                       size = mem->size - read_size;
+
+               dma_descr->src_addr_low = mem->addr + read_size;
+               dma_descr->read_data_size = size;
+
+               /* Write DMA descriptor to MS memory*/
+               temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
+               *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
+                                                  (u32 *)dma_descr, temp);
+               if (*ret) {
+                       dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
+                                mem->desc_card_addr);
+                       goto free_dma_descr;
+               }
+
+               *ret = qlcnic_start_pex_dma(adapter, mem);
+               if (*ret) {
+                       dev_info(dev, "Failed to start PEX DMA operation\n");
+                       goto free_dma_descr;
+               }
+
+               memcpy(buffer, dma_buffer, size);
+               buffer += size / 4;
+               read_size += size;
+       }
+
+free_dma_descr:
+       kfree(dma_descr);
+
+       return read_size;
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+                             struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       struct device *dev = &adapter->pdev->dev;
+       struct __mem *mem = &entry->region.mem;
+       u32 data_size;
+       int ret = 0;
+
+       if (fw_dump->use_pex_dma) {
+               data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
+                                                     &ret);
+               if (ret)
+                       dev_info(dev,
+                                "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
+                                entry->hdr.mask);
+               else
+                       return data_size;
+       }
+
+       data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
+       if (ret) {
+               dev_info(dev,
+                        "Failed to read memory dump using test agent method: mask[0x%x]\n",
+                        entry->hdr.mask);
+               return 0;
+       } else {
+               return data_size;
+       }
+}
+
 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
                           struct qlcnic_dump_entry *entry, __le32 *buffer)
 {
@@ -893,6 +1083,12 @@ flash_temp:
 
        tmpl_hdr = ahw->fw_dump.tmpl_hdr;
        tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+
+       if ((tmpl_hdr->version & 0xffffff) >= 0x20001)
+               ahw->fw_dump.use_pex_dma = true;
+       else
+               ahw->fw_dump.use_pex_dma = false;
+
        ahw->fw_dump.enable = 1;
 
        return 0;
@@ -910,7 +1106,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
        struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
        struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
        static const struct qlcnic_dump_operations *fw_dump_ops;
+       struct device *dev = &adapter->pdev->dev;
        struct qlcnic_hardware_context *ahw;
+       void *temp_buffer;
 
        ahw = adapter->ahw;
 
@@ -944,6 +1142,16 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
        tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
        tmpl_hdr->sys_info[1] = adapter->fw_version;
 
+       if (fw_dump->use_pex_dma) {
+               temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
+                                                &fw_dump->phys_addr,
+                                                GFP_KERNEL);
+               if (!temp_buffer)
+                       fw_dump->use_pex_dma = false;
+               else
+                       fw_dump->dma_buffer = temp_buffer;
+       }
+
        if (qlcnic_82xx_check(adapter)) {
                ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
                fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1002,6 +1210,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
                return 0;
        }
 error:
+       if (fw_dump->use_pex_dma)
+               dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
+                                 fw_dump->dma_buffer, fw_dump->phys_addr);
        vfree(fw_dump->data);
        return -EINVAL;
 }
index d85fbb57c25b1abc879668422158578e71eb515a..0daf660e12a1faa4a48adeacf5e371619ea18d0e 100644 (file)
@@ -129,6 +129,7 @@ struct qlcnic_vport {
        u8                      vlan_mode;
        u16                     vlan;
        u8                      qos;
+       bool                    spoofchk;
        u8                      mac[6];
 };
 
@@ -194,6 +195,8 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
                                   struct qlcnic_info *, u16);
 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
+int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
 
 static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
 {
@@ -225,6 +228,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
 int qlcnic_sriov_get_vf_config(struct net_device *, int ,
                               struct ifla_vf_info *);
 int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
 #else
 static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
 static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
index 44d547d78b84927db40201cfa03d6aea49bf37cd..d55624993b04ba61782c6f4b4c657bdd17101672 100644 (file)
@@ -35,6 +35,7 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
 static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
                                  struct qlcnic_cmd_args *);
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
 
 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
        .read_crb                       = qlcnic_83xx_read_crb,
@@ -75,6 +76,8 @@ static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
        .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
        .napi_add               = qlcnic_83xx_napi_add,
        .napi_del               = qlcnic_83xx_napi_del,
+       .shutdown               = qlcnic_sriov_vf_shutdown,
+       .resume                 = qlcnic_sriov_vf_resume,
        .config_ipaddr          = qlcnic_83xx_config_ipaddr,
        .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
 };
@@ -179,6 +182,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
                spin_lock_init(&vf->rcv_pend.lock);
                init_completion(&vf->ch_free_cmpl);
 
+               INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
+
                if (qlcnic_sriov_pf_check(adapter)) {
                        vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
                        if (!vp) {
@@ -187,6 +192,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
                        }
                        sriov->vf_info[i].vp = vp;
                        vp->max_tx_bw = MAX_BW;
+                       vp->spoofchk = true;
                        random_ether_addr(vp->mac);
                        dev_info(&adapter->pdev->dev,
                                 "MAC Address %pM is configured for VF %d\n",
@@ -280,9 +286,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
                                    u32 *pay, u8 pci_func, u8 size)
 {
+       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        unsigned long flags;
-       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
        u16 opcode;
        u8 mbx_err_code;
        int i, j;
@@ -330,15 +336,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
         * assume something is wrong.
         */
 poll:
-       rsp = qlcnic_83xx_mbx_poll(adapter);
+       rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
        if (rsp != QLCNIC_RCODE_TIMEOUT) {
                /* Get the FW response data */
                fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
                if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
                        __qlcnic_83xx_process_aen(adapter);
-                       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-                       if (mbx_val)
-                               goto poll;
+                       goto poll;
                }
                mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
                rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -654,6 +658,8 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
        if (qlcnic_read_mac_addr(adapter))
                dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
 
+       INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
        return 0;
 }
@@ -866,7 +872,6 @@ static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
            vf->adapter->need_fw_reset)
                return;
 
-       INIT_WORK(&vf->trans_work, func);
        queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
 }
 
@@ -1736,7 +1741,6 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
 
        if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
                qlcnic_sriov_vf_attach(adapter);
-               adapter->netdev->trans_start = jiffies;
                adapter->tx_timeo_cnt = 0;
                adapter->reset_ctx_cnt = 0;
                adapter->fw_fail_cnt = 0;
@@ -1952,3 +1956,54 @@ static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
                kfree(cur);
        }
 }
+
+int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+{
+       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       int retval;
+
+       netif_device_detach(netdev);
+       qlcnic_cancel_idc_work(adapter);
+
+       if (netif_running(netdev))
+               qlcnic_down(adapter, netdev);
+
+       qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+       qlcnic_sriov_cfg_bc_intr(adapter, 0);
+       qlcnic_83xx_disable_mbx_intr(adapter);
+       cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+       return 0;
+}
+
+int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+{
+       struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+       struct net_device *netdev = adapter->netdev;
+       int err;
+
+       set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+       qlcnic_83xx_enable_mbx_intrpt(adapter);
+       err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+       if (err)
+               return err;
+
+       err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+       if (!err) {
+               if (netif_running(netdev)) {
+                       err = qlcnic_up(adapter, netdev);
+                       if (!err)
+                               qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+               }
+       }
+
+       netif_device_attach(netdev);
+       qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+                            idc->delay);
+       return err;
+}
index c81be2da119bdc4b9aef1d3f770f3d76d4074b56..ee0c1d307966d842d824c3ed64449690946b7d76 100644 (file)
@@ -580,6 +580,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
        struct qlcnic_cmd_args cmd;
        struct qlcnic_vport *vp;
        int err, id;
+       u8 *mac;
 
        id = qlcnic_sriov_func_to_index(adapter, func);
        if (id < 0)
@@ -591,6 +592,14 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
                return err;
 
        cmd.req.arg[1] = 0x3 | func << 16;
+       if (vp->spoofchk == true) {
+               mac = vp->mac;
+               cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8;
+               cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 |
+                                mac[2] << 24;
+               cmd.req.arg[5] = mac[1] | mac[0] << 8;
+       }
+
        if (vp->vlan_mode == QLC_PVID_MODE) {
                cmd.req.arg[2] |= BIT_6;
                cmd.req.arg[3] |= vp->vlan << 8;
@@ -1133,9 +1142,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
        if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
                return -EINVAL;
 
-       if (!(cmd->req.arg[1] & BIT_8))
-               return -EINVAL;
-
        return 0;
 }
 
@@ -1770,6 +1776,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
        memcpy(&ivi->mac, vp->mac, ETH_ALEN);
        ivi->vlan = vp->vlan;
        ivi->qos = vp->qos;
+       ivi->spoofchk = vp->spoofchk;
        if (vp->max_tx_bw == MAX_BW)
                ivi->tx_rate = 0;
        else
@@ -1778,3 +1785,29 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
        ivi->vf = vf;
        return 0;
 }
+
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf_info;
+       struct qlcnic_vport *vp;
+
+       if (!qlcnic_sriov_pf_check(adapter))
+               return -EOPNOTSUPP;
+
+       if (vf >= sriov->num_vfs)
+               return -EINVAL;
+
+       vf_info = &sriov->vf_info[vf];
+       vp = vf_info->vp;
+       if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+               netdev_err(netdev,
+                          "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+                          vf);
+               return -EOPNOTSUPP;
+       }
+
+       vp->spoofchk = chk;
+       return 0;
+}
index 4e22e794a1863ea1abec985d91abe154afcf8b49..10ed82b3baca0970f65ce9797a18fc35cf76b1e3 100644 (file)
@@ -47,7 +47,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev,
        if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
                goto err_out;
 
-       if (strict_strtoul(buf, 2, &new))
+       if (kstrtoul(buf, 2, &new))
                goto err_out;
 
        if (!qlcnic_config_bridged_mode(adapter, !!new))
@@ -77,7 +77,7 @@ static ssize_t qlcnic_store_diag_mode(struct device *dev,
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        unsigned long new;
 
-       if (strict_strtoul(buf, 2, &new))
+       if (kstrtoul(buf, 2, &new))
                return -EINVAL;
 
        if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
@@ -114,57 +114,51 @@ static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
        return 0;
 }
 
-static ssize_t qlcnic_store_beacon(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t len)
+static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
+                                   const char *buf, size_t len)
 {
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_hardware_context *ahw = adapter->ahw;
-       int err, max_sds_rings = adapter->max_sds_rings;
-       u16 beacon;
-       u8 b_state, b_rate;
        unsigned long h_beacon;
+       int err;
 
-       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
-               dev_warn(dev,
-                        "LED test not supported in non privileged mode\n");
-               return -EOPNOTSUPP;
-       }
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+               return -EIO;
 
-       if (qlcnic_83xx_check(adapter) &&
-           !test_bit(__QLCNIC_RESETTING, &adapter->state)) {
-               if (kstrtoul(buf, 2, &h_beacon))
-                       return -EINVAL;
+       if (kstrtoul(buf, 2, &h_beacon))
+               return -EINVAL;
 
-               if (ahw->beacon_state == h_beacon)
-                       return len;
+       if (ahw->beacon_state == h_beacon)
+               return len;
 
-               rtnl_lock();
-               if (!ahw->beacon_state) {
-                       if (test_and_set_bit(__QLCNIC_LED_ENABLE,
-                                            &adapter->state)) {
-                               rtnl_unlock();
-                               return -EBUSY;
-                       }
-               }
-               if (h_beacon) {
-                       err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
-                       if (err)
-                               goto beacon_err;
-               } else {
-                       err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
-                       if (err)
-                               goto beacon_err;
+       rtnl_lock();
+       if (!ahw->beacon_state) {
+               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+                       rtnl_unlock();
+                       return -EBUSY;
                }
-               /* set the current beacon state */
+       }
+
+       if (h_beacon)
+               err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+       else
+               err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+       if (!err)
                ahw->beacon_state = h_beacon;
-beacon_err:
-               if (!ahw->beacon_state)
-                       clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
 
-               rtnl_unlock();
-               return len;
-       }
+       if (!ahw->beacon_state)
+               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+       rtnl_unlock();
+       return len;
+}
+
+static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
+                                   const char *buf, size_t len)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err, max_sds_rings = adapter->max_sds_rings;
+       u16 beacon;
+       u8 h_beacon_state, b_state, b_rate;
 
        if (len != sizeof(u16))
                return QL_STATUS_INVALID_PARAM;
@@ -174,16 +168,29 @@ beacon_err:
        if (err)
                return err;
 
-       if (adapter->ahw->beacon_state == b_state)
+       if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+               err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
+               if (!err) {
+                       dev_info(&adapter->pdev->dev,
+                                "Failed to get current beacon state\n");
+               } else {
+                       if (h_beacon_state == QLCNIC_BEACON_DISABLE)
+                               ahw->beacon_state = 0;
+                       else if (h_beacon_state == QLCNIC_BEACON_EANBLE)
+                               ahw->beacon_state = 2;
+               }
+       }
+
+       if (ahw->beacon_state == b_state)
                return len;
 
        rtnl_lock();
-
-       if (!adapter->ahw->beacon_state)
+       if (!ahw->beacon_state) {
                if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
                        rtnl_unlock();
                        return -EBUSY;
                }
+       }
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                err = -EIO;
@@ -206,14 +213,37 @@ beacon_err:
        if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
                qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
 
- out:
-       if (!adapter->ahw->beacon_state)
+out:
+       if (!ahw->beacon_state)
                clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
        rtnl_unlock();
 
        return err;
 }
 
+static ssize_t qlcnic_store_beacon(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t len)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int err = 0;
+
+       if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               dev_warn(dev,
+                        "LED test not supported in non privileged mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (qlcnic_82xx_check(adapter))
+               err = qlcnic_82xx_store_beacon(adapter, buf, len);
+       else if (qlcnic_83xx_check(adapter))
+               err = qlcnic_83xx_store_beacon(adapter, buf, len);
+       else
+               return -EIO;
+
+       return err;
+}
+
 static ssize_t qlcnic_show_beacon(struct device *dev,
                                  struct device_attribute *attr, char *buf)
 {
@@ -544,6 +574,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
                switch (esw_cfg[i].op_mode) {
                case QLCNIC_PORT_DEFAULTS:
                        qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+                       rtnl_lock();
+                       qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
+                       rtnl_unlock();
                        break;
                case QLCNIC_ADD_VLAN:
                        qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
index 87463bc701a653781371feb05599aad2b45d224a..2553cf4503b9f83996bb72bd291fb756558644f9 100644 (file)
@@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
                if (pci_dma_mapping_error(qdev->pdev, map)) {
                        __free_pages(rx_ring->pg_chunk.page,
                                        qdev->lbq_buf_order);
+                       rx_ring->pg_chunk.page = NULL;
                        netif_err(qdev, drv, qdev->ndev,
                                  "PCI mapping failed.\n");
                        return -ENOMEM;
@@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
                        curr_idx = 0;
 
        }
+       if (rx_ring->pg_chunk.page) {
+               pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
+                       ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+               put_page(rx_ring->pg_chunk.page);
+               rx_ring->pg_chunk.page = NULL;
+       }
 }
 
 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
@@ -4710,6 +4717,7 @@ static int qlge_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev, "net device registration failed.\n");
                ql_release_all(pdev);
                pci_disable_device(pdev);
+               free_netdev(ndev);
                return err;
        }
        /* Start up the timer to trigger EEH if
@@ -4938,15 +4946,4 @@ static struct pci_driver qlge_driver = {
        .err_handler = &qlge_err_handler
 };
 
-static int __init qlge_init_module(void)
-{
-       return pci_register_driver(&qlge_driver);
-}
-
-static void __exit qlge_exit(void)
-{
-       pci_unregister_driver(&qlge_driver);
-}
-
-module_init(qlge_init_module);
-module_exit(qlge_exit);
+module_pci_driver(qlge_driver);
index c8ba4b3494c17181e507af32cfcd722c1da28c68..2055f7eb2ba94f6855a37bf8a613dd77e75a757e 100644 (file)
@@ -22,7 +22,6 @@ config R6040
        tristate "RDC R6040 Fast Ethernet Adapter support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        select PHYLIB
        ---help---
index 7d1fb9ad1296e5825da2d845129ef38b51728cd7..03523459c4061627d1fad51f751082298eec2147 100644 (file)
@@ -1136,6 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
                        cp->dev->stats.tx_dropped++;
                }
        }
+       netdev_reset_queue(cp->dev);
 
        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
index 783fa8b5cde77fba13538764f0af3466b1e7f368..ae5d027096ed6f39e6005490b417bf56d44dd565 100644 (file)
@@ -37,7 +37,6 @@ config 8139CP
        tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This is a driver for the Fast Ethernet PCI network cards based on
@@ -52,7 +51,6 @@ config 8139TOO
        tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This is a driver for the Fast Ethernet PCI network cards based on
@@ -107,7 +105,6 @@ config R8169
        depends on PCI
        select FW_LOADER
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
index 79c520b64fddd00ab36436215abbfa5847aa8336..393f961a013cec14daf9c624021fdd60be23939d 100644 (file)
@@ -5856,7 +5856,20 @@ err_out:
        return -EIO;
 }
 
-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+static bool rtl_skb_pad(struct sk_buff *skb)
+{
+       if (skb_padto(skb, ETH_ZLEN))
+               return false;
+       skb_put(skb, ETH_ZLEN - skb->len);
+       return true;
+}
+
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+{
+       return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+}
+
+static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
                                    struct sk_buff *skb, u32 *opts)
 {
        const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
@@ -5869,13 +5882,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                const struct iphdr *ip = ip_hdr(skb);
 
+               if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+                       return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
+
                if (ip->protocol == IPPROTO_TCP)
                        opts[offset] |= info->checksum.tcp;
                else if (ip->protocol == IPPROTO_UDP)
                        opts[offset] |= info->checksum.udp;
                else
                        WARN_ON_ONCE(1);
+       } else {
+               if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+                       return rtl_skb_pad(skb);
        }
+       return true;
 }
 
 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -5896,17 +5916,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
                goto err_stop_0;
        }
 
-       /* 8168evl does not automatically pad to minimum length. */
-       if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
-                    skb->len < ETH_ZLEN)) {
-               if (skb_padto(skb, ETH_ZLEN))
-                       goto err_update_stats;
-               skb_put(skb, ETH_ZLEN - skb->len);
-       }
-
        if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
                goto err_stop_0;
 
+       opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
+       opts[0] = DescOwn;
+
+       if (!rtl8169_tso_csum(tp, skb, opts))
+               goto err_update_stats;
+
        len = skb_headlen(skb);
        mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(d, mapping))) {
@@ -5918,11 +5936,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        tp->tx_skb[entry].len = len;
        txd->addr = cpu_to_le64(mapping);
 
-       opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
-       opts[0] = DescOwn;
-
-       rtl8169_tso_csum(tp, skb, opts);
-
        frags = rtl8169_xmit_frags(tp, skb, opts);
        if (frags < 0)
                goto err_dma_1;
index bed9841d728cb87e65851bef803c02ebac1beb64..544514e66187e3300320114c7e35826fbc8dab67 100644 (file)
@@ -4,14 +4,7 @@
 
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
-       depends on (SUPERH || ARCH_SHMOBILE) && \
-               (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
-                CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
-                CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
-                CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || \
-                ARCH_R8A7778 || ARCH_R8A7779)
        select CRC32
-       select NET_CORE
        select MII
        select MDIO_BITBANG
        select PHYLIB
index 33dc6f2418f2968e23a383e2c0395ed41d832132..7732f11f14ad466f335a54289b2872be0651c176 100644 (file)
@@ -313,9 +313,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_ADRL31]    = 0x01fc,
 };
 
-#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
-       defined(CONFIG_CPU_SUBTYPE_SH7763) || \
-       defined(CONFIG_ARCH_R8A7740)
+static int sh_eth_is_gether(struct sh_eth_private *mdp)
+{
+       if (mdp->reg_offset == sh_eth_offset_gigabit)
+               return 1;
+       else
+               return 0;
+}
+
 static void sh_eth_select_mii(struct net_device *ndev)
 {
        u32 value = 0x0;
@@ -339,11 +344,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
 
        sh_eth_write(ndev, value, RMII_MII);
 }
-#endif
 
-/* There is CPU dependent code */
-#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779)
-#define SH_ETH_RESET_DEFAULT   1
 static void sh_eth_set_duplex(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -354,7 +355,8 @@ static void sh_eth_set_duplex(struct net_device *ndev)
                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 }
 
-static void sh_eth_set_rate(struct net_device *ndev)
+/* There is CPU dependent code */
+static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
@@ -371,9 +373,9 @@ static void sh_eth_set_rate(struct net_device *ndev)
 }
 
 /* R8A7778/9 */
-static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+static struct sh_eth_cpu_data r8a777x_data = {
        .set_duplex     = sh_eth_set_duplex,
-       .set_rate       = sh_eth_set_rate,
+       .set_rate       = sh_eth_set_rate_r8a777x,
 
        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
@@ -382,26 +384,14 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
                          EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
-       .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
        .hw_swap        = 1,
 };
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-#define SH_ETH_RESET_DEFAULT   1
-static void sh_eth_set_duplex(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       if (mdp->duplex) /* Full */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
-       else            /* Half */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
-}
 
-static void sh_eth_set_rate(struct net_device *ndev)
+static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
@@ -418,18 +408,17 @@ static void sh_eth_set_rate(struct net_device *ndev)
 }
 
 /* SH7724 */
-static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+static struct sh_eth_cpu_data sh7724_data = {
        .set_duplex     = sh_eth_set_duplex,
-       .set_rate       = sh_eth_set_rate,
+       .set_rate       = sh_eth_set_rate_sh7724,
 
        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
+       .eesipr_value   = 0x01ff009f,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
                          EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
-       .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
        .apr            = 1,
        .mpr            = 1,
@@ -438,22 +427,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .rpadir         = 1,
        .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
 };
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
-#define SH_ETH_HAS_BOTH_MODULES        1
-#define SH_ETH_HAS_TSU 1
-static int sh_eth_check_reset(struct net_device *ndev);
 
-static void sh_eth_set_duplex(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       if (mdp->duplex) /* Full */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
-       else            /* Half */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
-}
-
-static void sh_eth_set_rate(struct net_device *ndev)
+static void sh_eth_set_rate_sh7757(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
@@ -470,9 +445,9 @@ static void sh_eth_set_rate(struct net_device *ndev)
 }
 
 /* SH7757 */
-static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
-       .set_duplex             = sh_eth_set_duplex,
-       .set_rate               = sh_eth_set_rate,
+static struct sh_eth_cpu_data sh7757_data = {
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate_sh7757,
 
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
        .rmcr_value     = 0x00000001,
@@ -480,8 +455,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
                          EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
-       .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
+       .irq_flags      = IRQF_SHARED,
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
@@ -491,7 +466,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .rpadir_value   = 2 << 16,
 };
 
-#define SH_GIGA_ETH_BASE       0xfee00000
+#define SH_GIGA_ETH_BASE       0xfee00000UL
 #define GIGA_MALR(port)                (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
 #define GIGA_MAHR(port)                (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 static void sh_eth_chip_reset_giga(struct net_device *ndev)
@@ -516,52 +491,6 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
        }
 }
 
-static int sh_eth_is_gether(struct sh_eth_private *mdp);
-static int sh_eth_reset(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       int ret = 0;
-
-       if (sh_eth_is_gether(mdp)) {
-               sh_eth_write(ndev, 0x03, EDSR);
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
-                               EDMR);
-
-               ret = sh_eth_check_reset(ndev);
-               if (ret)
-                       goto out;
-
-               /* Table Init */
-               sh_eth_write(ndev, 0x0, TDLAR);
-               sh_eth_write(ndev, 0x0, TDFAR);
-               sh_eth_write(ndev, 0x0, TDFXR);
-               sh_eth_write(ndev, 0x0, TDFFR);
-               sh_eth_write(ndev, 0x0, RDLAR);
-               sh_eth_write(ndev, 0x0, RDFAR);
-               sh_eth_write(ndev, 0x0, RDFXR);
-               sh_eth_write(ndev, 0x0, RDFFR);
-       } else {
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
-                               EDMR);
-               mdelay(3);
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
-                               EDMR);
-       }
-
-out:
-       return ret;
-}
-
-static void sh_eth_set_duplex_giga(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       if (mdp->duplex) /* Full */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
-       else            /* Half */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
-}
-
 static void sh_eth_set_rate_giga(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -582,9 +511,9 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
 }
 
 /* SH7757(GETHERC) */
-static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+static struct sh_eth_cpu_data sh7757_data_giga = {
        .chip_reset     = sh_eth_chip_reset_giga,
-       .set_duplex     = sh_eth_set_duplex_giga,
+       .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_giga,
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
@@ -595,11 +524,10 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
                          EESR_ECI,
-       .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
-                         EESR_TFE,
        .fdr_value      = 0x0000072f,
        .rmcr_value     = 0x00000001,
 
+       .irq_flags      = IRQF_SHARED,
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
@@ -612,19 +540,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
        .tsu            = 1,
 };
 
-static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
-{
-       if (sh_eth_is_gether(mdp))
-               return &sh_eth_my_cpu_data_giga;
-       else
-               return &sh_eth_my_cpu_data;
-}
-
-#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
-#define SH_ETH_HAS_TSU 1
-static int sh_eth_check_reset(struct net_device *ndev);
-static void sh_eth_reset_hw_crc(struct net_device *ndev);
-
 static void sh_eth_chip_reset(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -634,17 +549,7 @@ static void sh_eth_chip_reset(struct net_device *ndev)
        mdelay(1);
 }
 
-static void sh_eth_set_duplex(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       if (mdp->duplex) /* Full */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
-       else            /* Half */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
-}
-
-static void sh_eth_set_rate(struct net_device *ndev)
+static void sh_eth_set_rate_gether(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
@@ -663,11 +568,11 @@ static void sh_eth_set_rate(struct net_device *ndev)
        }
 }
 
-/* sh7763 */
-static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+/* SH7734 */
+static struct sh_eth_cpu_data sh7734_data = {
        .chip_reset     = sh_eth_chip_reset,
        .set_duplex     = sh_eth_set_duplex,
-       .set_rate       = sh_eth_set_rate,
+       .set_rate       = sh_eth_set_rate_gether,
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
@@ -677,8 +582,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
                          EESR_ECI,
-       .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
-                         EESR_TFE,
 
        .apr            = 1,
        .mpr            = 1,
@@ -688,54 +591,37 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .no_trimd       = 1,
        .no_ade         = 1,
        .tsu            = 1,
-#if defined(CONFIG_CPU_SUBTYPE_SH7734)
-       .hw_crc     = 1,
-       .select_mii = 1,
-#endif
+       .hw_crc         = 1,
+       .select_mii     = 1,
 };
 
-static int sh_eth_reset(struct net_device *ndev)
-{
-       int ret = 0;
-
-       sh_eth_write(ndev, EDSR_ENALL, EDSR);
-       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
-
-       ret = sh_eth_check_reset(ndev);
-       if (ret)
-               goto out;
+/* SH7763 */
+static struct sh_eth_cpu_data sh7763_data = {
+       .chip_reset     = sh_eth_chip_reset,
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate_gether,
 
-       /* Table Init */
-       sh_eth_write(ndev, 0x0, TDLAR);
-       sh_eth_write(ndev, 0x0, TDFAR);
-       sh_eth_write(ndev, 0x0, TDFXR);
-       sh_eth_write(ndev, 0x0, TDFFR);
-       sh_eth_write(ndev, 0x0, RDLAR);
-       sh_eth_write(ndev, 0x0, RDFAR);
-       sh_eth_write(ndev, 0x0, RDFXR);
-       sh_eth_write(ndev, 0x0, RDFFR);
-
-       /* Reset HW CRC register */
-       sh_eth_reset_hw_crc(ndev);
-
-       /* Select MII mode */
-       if (sh_eth_my_cpu_data.select_mii)
-               sh_eth_select_mii(ndev);
-out:
-       return ret;
-}
+       .ecsr_value     = ECSR_ICD | ECSR_MPD,
+       .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
-static void sh_eth_reset_hw_crc(struct net_device *ndev)
-{
-       if (sh_eth_my_cpu_data.hw_crc)
-               sh_eth_write(ndev, 0x0, CSMR);
-}
+       .tx_check       = EESR_TC1 | EESR_FTC,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+                         EESR_ECI,
 
-#elif defined(CONFIG_ARCH_R8A7740)
-#define SH_ETH_HAS_TSU 1
-static int sh_eth_check_reset(struct net_device *ndev);
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .bculr          = 1,
+       .hw_swap        = 1,
+       .no_trimd       = 1,
+       .no_ade         = 1,
+       .tsu            = 1,
+       .irq_flags      = IRQF_SHARED,
+};
 
-static void sh_eth_chip_reset(struct net_device *ndev)
+static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
@@ -746,65 +632,11 @@ static void sh_eth_chip_reset(struct net_device *ndev)
        sh_eth_select_mii(ndev);
 }
 
-static int sh_eth_reset(struct net_device *ndev)
-{
-       int ret = 0;
-
-       sh_eth_write(ndev, EDSR_ENALL, EDSR);
-       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
-
-       ret = sh_eth_check_reset(ndev);
-       if (ret)
-               goto out;
-
-       /* Table Init */
-       sh_eth_write(ndev, 0x0, TDLAR);
-       sh_eth_write(ndev, 0x0, TDFAR);
-       sh_eth_write(ndev, 0x0, TDFXR);
-       sh_eth_write(ndev, 0x0, TDFFR);
-       sh_eth_write(ndev, 0x0, RDLAR);
-       sh_eth_write(ndev, 0x0, RDFAR);
-       sh_eth_write(ndev, 0x0, RDFXR);
-       sh_eth_write(ndev, 0x0, RDFFR);
-
-out:
-       return ret;
-}
-
-static void sh_eth_set_duplex(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       if (mdp->duplex) /* Full */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
-       else            /* Half */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
-}
-
-static void sh_eth_set_rate(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-
-       switch (mdp->speed) {
-       case 10: /* 10BASE */
-               sh_eth_write(ndev, GECMR_10, GECMR);
-               break;
-       case 100:/* 100BASE */
-               sh_eth_write(ndev, GECMR_100, GECMR);
-               break;
-       case 1000: /* 1000BASE */
-               sh_eth_write(ndev, GECMR_1000, GECMR);
-               break;
-       default:
-               break;
-       }
-}
-
 /* R8A7740 */
-static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
-       .chip_reset     = sh_eth_chip_reset,
+static struct sh_eth_cpu_data r8a7740_data = {
+       .chip_reset     = sh_eth_chip_reset_r8a7740,
        .set_duplex     = sh_eth_set_duplex,
-       .set_rate       = sh_eth_set_rate,
+       .set_rate       = sh_eth_set_rate_gether,
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
@@ -814,8 +646,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
                          EESR_ECI,
-       .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
-                         EESR_TFE,
 
        .apr            = 1,
        .mpr            = 1,
@@ -826,11 +656,10 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .no_ade         = 1,
        .tsu            = 1,
        .select_mii     = 1,
+       .shift_rd0      = 1,
 };
 
-#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
-#define SH_ETH_RESET_DEFAULT   1
-static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+static struct sh_eth_cpu_data sh7619_data = {
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
        .apr            = 1,
@@ -838,14 +667,11 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
 };
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-#define SH_ETH_RESET_DEFAULT   1
-#define SH_ETH_HAS_TSU 1
-static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+
+static struct sh_eth_cpu_data sh771x_data = {
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
        .tsu            = 1,
 };
-#endif
 
 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 {
@@ -870,22 +696,8 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 
        if (!cd->eesr_err_check)
                cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
-
-       if (!cd->tx_error_check)
-               cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
 }
 
-#if defined(SH_ETH_RESET_DEFAULT)
-/* Chip Reset */
-static int  sh_eth_reset(struct net_device *ndev)
-{
-       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
-       mdelay(3);
-       sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
-
-       return 0;
-}
-#else
 static int sh_eth_check_reset(struct net_device *ndev)
 {
        int ret = 0;
@@ -897,13 +709,55 @@ static int sh_eth_check_reset(struct net_device *ndev)
                mdelay(1);
                cnt--;
        }
-       if (cnt < 0) {
-               pr_err("Device reset fail\n");
+       if (cnt <= 0) {
+               pr_err("Device reset failed\n");
                ret = -ETIMEDOUT;
        }
        return ret;
 }
-#endif
+
+static int sh_eth_reset(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int ret = 0;
+
+       if (sh_eth_is_gether(mdp)) {
+               sh_eth_write(ndev, EDSR_ENALL, EDSR);
+               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
+                            EDMR);
+
+               ret = sh_eth_check_reset(ndev);
+               if (ret)
+                       goto out;
+
+               /* Table Init */
+               sh_eth_write(ndev, 0x0, TDLAR);
+               sh_eth_write(ndev, 0x0, TDFAR);
+               sh_eth_write(ndev, 0x0, TDFXR);
+               sh_eth_write(ndev, 0x0, TDFFR);
+               sh_eth_write(ndev, 0x0, RDLAR);
+               sh_eth_write(ndev, 0x0, RDFAR);
+               sh_eth_write(ndev, 0x0, RDFXR);
+               sh_eth_write(ndev, 0x0, RDFFR);
+
+               /* Reset HW CRC register */
+               if (mdp->cd->hw_crc)
+                       sh_eth_write(ndev, 0x0, CSMR);
+
+               /* Select MII mode */
+               if (mdp->cd->select_mii)
+                       sh_eth_select_mii(ndev);
+       } else {
+               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
+                            EDMR);
+               mdelay(3);
+               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
+                            EDMR);
+       }
+
+out:
+       return ret;
+}
 
 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 static void sh_eth_set_receive_align(struct sk_buff *skb)
@@ -979,14 +833,6 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
        }
 }
 
-static int sh_eth_is_gether(struct sh_eth_private *mdp)
-{
-       if (mdp->reg_offset == sh_eth_offset_gigabit)
-               return 1;
-       else
-               return 0;
-}
-
 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
 {
        if (sh_eth_is_gether(mdp))
@@ -1385,7 +1231,7 @@ static int sh_eth_txfree(struct net_device *ndev)
 }
 
 /* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_rxdesc *rxdesc;
@@ -1393,6 +1239,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
        int entry = mdp->cur_rx % mdp->num_rx_ring;
        int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
        struct sk_buff *skb;
+       int exceeded = 0;
        u16 pkt_len = 0;
        u32 desc_status;
 
@@ -1401,16 +1248,28 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
                desc_status = edmac_to_cpu(mdp, rxdesc->status);
                pkt_len = rxdesc->frame_length;
 
-#if defined(CONFIG_ARCH_R8A7740)
-               desc_status >>= 16;
-#endif
-
                if (--boguscnt < 0)
                        break;
 
+               if (*quota <= 0) {
+                       exceeded = 1;
+                       break;
+               }
+               (*quota)--;
+
                if (!(desc_status & RDFEND))
                        ndev->stats.rx_length_errors++;
 
+               /*
+                * In case of almost all GETHER/ETHERs, the Receive Frame State
+                * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
+                * bit 0. However, in case of the R8A7740's GETHER, the RFS
+                * bits are from bit 25 to bit 16. So, the driver needs right
+                * shifting by 16.
+                */
+               if (mdp->cd->shift_rd0)
+                       desc_status >>= 16;
+
                if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
                                   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
                        ndev->stats.rx_errors++;
@@ -1484,7 +1343,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
                sh_eth_write(ndev, EDRRR_R, EDRRR);
        }
 
-       return 0;
+       return exceeded;
 }
 
 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -1625,7 +1484,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_cpu_data *cd = mdp->cd;
        irqreturn_t ret = IRQ_NONE;
-       unsigned long intr_status;
+       unsigned long intr_status, intr_enable;
 
        spin_lock(&mdp->lock);
 
@@ -1636,34 +1495,41 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
         * and we need to fully handle it in sh_eth_error() in order to quench
         * it as it doesn't get cleared by just writing 1 to the ECI bit...
         */
-       intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
-       /* Clear interrupt */
-       if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
-                       EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
-                       cd->tx_check | cd->eesr_err_check)) {
-               sh_eth_write(ndev, intr_status, EESR);
+       intr_enable = sh_eth_read(ndev, EESIPR);
+       intr_status &= intr_enable | DMAC_M_ECI;
+       if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
                ret = IRQ_HANDLED;
-       else
+       else
                goto other_irq;
 
-       if (intr_status & (EESR_FRC | /* Frame recv*/
-                       EESR_RMAF | /* Multi cast address recv*/
-                       EESR_RRF  | /* Bit frame recv */
-                       EESR_RTLF | /* Long frame recv*/
-                       EESR_RTSF | /* short frame recv */
-                       EESR_PRE  | /* PHY-LSI recv error */
-                       EESR_CERF)){ /* recv frame CRC error */
-               sh_eth_rx(ndev, intr_status);
+       if (intr_status & EESR_RX_CHECK) {
+               if (napi_schedule_prep(&mdp->napi)) {
+                       /* Mask Rx interrupts */
+                       sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
+                                    EESIPR);
+                       __napi_schedule(&mdp->napi);
+               } else {
+                       dev_warn(&ndev->dev,
+                                "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
+                                intr_status, intr_enable);
+               }
        }
 
        /* Tx Check */
        if (intr_status & cd->tx_check) {
+               /* Clear Tx interrupts */
+               sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
+
                sh_eth_txfree(ndev);
                netif_wake_queue(ndev);
        }
 
-       if (intr_status & cd->eesr_err_check)
+       if (intr_status & cd->eesr_err_check) {
+               /* Clear error interrupts */
+               sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
+
                sh_eth_error(ndev, intr_status);
+       }
 
 other_irq:
        spin_unlock(&mdp->lock);
@@ -1671,6 +1537,33 @@ other_irq:
        return ret;
 }
 
+static int sh_eth_poll(struct napi_struct *napi, int budget)
+{
+       struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
+                                                 napi);
+       struct net_device *ndev = napi->dev;
+       int quota = budget;
+       unsigned long intr_status;
+
+       for (;;) {
+               intr_status = sh_eth_read(ndev, EESR);
+               if (!(intr_status & EESR_RX_CHECK))
+                       break;
+               /* Clear Rx interrupts */
+               sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
+
+               if (sh_eth_rx(ndev, intr_status, &quota))
+                       goto out;
+       }
+
+       napi_complete(napi);
+
+       /* Reenable Rx interrupts */
+       sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+out:
+       return budget - quota;
+}
+
 /* PHY state control function */
 static void sh_eth_adjust_link(struct net_device *ndev)
 {
@@ -1961,14 +1854,7 @@ static int sh_eth_open(struct net_device *ndev)
        pm_runtime_get_sync(&mdp->pdev->dev);
 
        ret = request_irq(ndev->irq, sh_eth_interrupt,
-#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
-       defined(CONFIG_CPU_SUBTYPE_SH7764) || \
-       defined(CONFIG_CPU_SUBTYPE_SH7757)
-                               IRQF_SHARED,
-#else
-                               0,
-#endif
-                               ndev->name, ndev);
+                         mdp->cd->irq_flags, ndev->name, ndev);
        if (ret) {
                dev_err(&ndev->dev, "Can not assign IRQ number\n");
                return ret;
@@ -1989,6 +1875,8 @@ static int sh_eth_open(struct net_device *ndev)
        if (ret)
                goto out_free_irq;
 
+       napi_enable(&mdp->napi);
+
        return ret;
 
 out_free_irq:
@@ -2084,6 +1972,8 @@ static int sh_eth_close(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
+       napi_disable(&mdp->napi);
+
        netif_stop_queue(ndev);
 
        /* Disable interrupts by clearing the interrupt mask. */
@@ -2154,7 +2044,6 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
        return phy_mii_ioctl(phydev, rq, cmd);
 }
 
-#if defined(SH_ETH_HAS_TSU)
 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
                                            int entry)
@@ -2497,7 +2386,6 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
 
        return 0;
 }
-#endif /* SH_ETH_HAS_TSU */
 
 /* SuperH's TSU register init function */
 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
@@ -2641,11 +2529,21 @@ static const struct net_device_ops sh_eth_netdev_ops = {
        .ndo_stop               = sh_eth_close,
        .ndo_start_xmit         = sh_eth_start_xmit,
        .ndo_get_stats          = sh_eth_get_stats,
-#if defined(SH_ETH_HAS_TSU)
+       .ndo_tx_timeout         = sh_eth_tx_timeout,
+       .ndo_do_ioctl           = sh_eth_do_ioctl,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+};
+
+static const struct net_device_ops sh_eth_netdev_ops_tsu = {
+       .ndo_open               = sh_eth_open,
+       .ndo_stop               = sh_eth_close,
+       .ndo_start_xmit         = sh_eth_start_xmit,
+       .ndo_get_stats          = sh_eth_get_stats,
        .ndo_set_rx_mode        = sh_eth_set_multicast_list,
        .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
-#endif
        .ndo_tx_timeout         = sh_eth_tx_timeout,
        .ndo_do_ioctl           = sh_eth_do_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
@@ -2660,6 +2558,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        struct net_device *ndev = NULL;
        struct sh_eth_private *mdp = NULL;
        struct sh_eth_plat_data *pd = pdev->dev.platform_data;
+       const struct platform_device_id *id = platform_get_device_id(pdev);
 
        /* get base addr */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2718,15 +2617,14 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
 
        /* set cpu data */
-#if defined(SH_ETH_HAS_BOTH_MODULES)
-       mdp->cd = sh_eth_get_cpu_data(mdp);
-#else
-       mdp->cd = &sh_eth_my_cpu_data;
-#endif
+       mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
        sh_eth_set_default_cpu_data(mdp->cd);
 
        /* set function */
-       ndev->netdev_ops = &sh_eth_netdev_ops;
+       if (mdp->cd->tsu)
+               ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
+       else
+               ndev->netdev_ops = &sh_eth_netdev_ops;
        SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
        ndev->watchdog_timeo = TX_TIMEOUT;
 
@@ -2745,11 +2643,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        if (mdp->cd->tsu) {
                struct resource *rtsu;
                rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               if (!rtsu) {
-                       dev_err(&pdev->dev, "Not found TSU resource\n");
-                       ret = -ENODEV;
-                       goto out_release;
-               }
                mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
                if (IS_ERR(mdp->tsu_addr)) {
                        ret = PTR_ERR(mdp->tsu_addr);
@@ -2770,10 +2663,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                }
        }
 
+       netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
+
        /* network device register */
        ret = register_netdev(ndev);
        if (ret)
-               goto out_release;
+               goto out_napi_del;
 
        /* mdio bus init */
        ret = sh_mdio_init(ndev, pdev->id, pd);
@@ -2791,6 +2686,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 out_unregister:
        unregister_netdev(ndev);
 
+out_napi_del:
+       netif_napi_del(&mdp->napi);
+
 out_release:
        /* net_dev free */
        if (ndev)
@@ -2803,16 +2701,18 @@ out:
 static int sh_eth_drv_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct sh_eth_private *mdp = netdev_priv(ndev);
 
        sh_mdio_release(ndev);
        unregister_netdev(ndev);
+       netif_napi_del(&mdp->napi);
        pm_runtime_disable(&pdev->dev);
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int sh_eth_runtime_nop(struct device *dev)
 {
        /*
@@ -2826,17 +2726,36 @@ static int sh_eth_runtime_nop(struct device *dev)
        return 0;
 }
 
-static struct dev_pm_ops sh_eth_dev_pm_ops = {
+static const struct dev_pm_ops sh_eth_dev_pm_ops = {
        .runtime_suspend = sh_eth_runtime_nop,
        .runtime_resume = sh_eth_runtime_nop,
 };
+#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
+#else
+#define SH_ETH_PM_OPS NULL
+#endif
+
+static struct platform_device_id sh_eth_id_table[] = {
+       { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
+       { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
+       { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
+       { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
+       { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
+       { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
+       { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
+       { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
+       { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
+       { }
+};
+MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
 
 static struct platform_driver sh_eth_driver = {
        .probe = sh_eth_drv_probe,
        .remove = sh_eth_drv_remove,
+       .id_table = sh_eth_id_table,
        .driver = {
                   .name = CARDNAME,
-                  .pm = &sh_eth_dev_pm_ops,
+                  .pm = SH_ETH_PM_OPS,
        },
 };
 
index 1ddc9f235bcb393cd915764764eb3ab53f313e61..a78fb0c424f86ea17bbcb3629ac1dfd0e83feb8e 100644 (file)
@@ -166,19 +166,16 @@ enum {
 /*
  * Register's bits
  */
-#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\
-    defined(CONFIG_ARCH_R8A7740)
-/* EDSR */
+/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */
 enum EDSR_BIT {
        EDSR_ENT = 0x01, EDSR_ENR = 0x02,
 };
 #define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
 
-/* GECMR */
+/* GECMR : sh7734, sh7763 and r8a7740 only */
 enum GECMR_BIT {
        GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
 };
-#endif
 
 /* EDMR */
 enum DMAC_M_BIT {
@@ -251,13 +248,19 @@ enum EESR_BIT {
        EESR_CERF       = 0x00000001,
 };
 
+#define EESR_RX_CHECK          (EESR_FRC  | /* Frame recv */           \
+                                EESR_RMAF | /* Multicast address recv */ \
+                                EESR_RRF  | /* Bit frame recv */       \
+                                EESR_RTLF | /* Long frame recv */      \
+                                EESR_RTSF | /* Short frame recv */     \
+                                EESR_PRE  | /* PHY-LSI recv error */   \
+                                EESR_CERF)  /* Recv frame CRC error */
+
 #define DEFAULT_TX_CHECK       (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
                                 EESR_RTO)
 #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
                                 EESR_RDE | EESR_RFRMER | EESR_ADE | \
                                 EESR_TFE | EESR_TDE | EESR_ECI)
-#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
-                                EESR_TFE)
 
 /* EESIPR */
 enum DMAC_IM_BIT {
@@ -299,11 +302,11 @@ enum FCFTR_BIT {
 #define DEFAULT_FIFO_F_D_RFF   (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
 #define DEFAULT_FIFO_F_D_RFD   (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
 
-/* Transfer descriptor bit */
+/* Transmit descriptor bit */
 enum TD_STS_BIT {
-       TD_TACT = 0x80000000,
-       TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
-       TD_TFP0 = 0x10000000,
+       TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
+       TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
+       TD_TFE  = 0x08000000, TD_TWBI = 0x04000000,
 };
 #define TDF1ST TD_TFP1
 #define TDFEND TD_TFP0
@@ -463,9 +466,9 @@ struct sh_eth_cpu_data {
        /* interrupt checking mask */
        unsigned long tx_check;
        unsigned long eesr_err_check;
-       unsigned long tx_error_check;
 
        /* hardware features */
+       unsigned long irq_flags;        /* IRQ configuration flags */
        unsigned no_psr:1;              /* EtherC DO NOT have PSR */
        unsigned apr:1;                 /* EtherC have APR */
        unsigned mpr:1;                 /* EtherC have MPR */
@@ -478,6 +481,7 @@ struct sh_eth_cpu_data {
        unsigned no_ade:1;      /* E-DMAC DO NOT have ADE bit in EESR */
        unsigned hw_crc:1;      /* E-DMAC have CSMR */
        unsigned select_mii:1;  /* EtherC have RMII_MII (MII select register) */
+       unsigned shift_rd0:1;   /* shift Rx descriptor word 0 right by 16 */
 };
 
 struct sh_eth_private {
@@ -499,6 +503,7 @@ struct sh_eth_private {
        u32 cur_tx, dirty_tx;
        u32 rx_buf_sz;          /* Based on MTU+slack. */
        int edmac_endian;
+       struct napi_struct napi;
        /* MII transceiver section. */
        u32 phy_id;                                     /* PHY ID */
        struct mii_bus *mii_bus;        /* MDIO bus control */
index b6739afeaca1517bf3351dae9e3a33469cccc46e..a99739c5142cb1f296a7fa8ea83a4efca8f68ea6 100644 (file)
@@ -1040,7 +1040,6 @@ static int s6gmac_remove(struct platform_device *pdev)
                unregister_netdev(dev);
                free_irq(dev->irq, dev);
                free_netdev(dev);
-               platform_set_drvdata(pdev, NULL);
        }
        return 0;
 }
index 0ad5694b41f8eb12d43d2e518b89607ef373bbea..856e523ac936edfbd4f32c55335fc13b1e2443d8 100644 (file)
@@ -818,7 +818,6 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
        dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
                             sp->srings_dma);
        free_netdev(dev);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 01b99206139a0acefeb9452ad62b55346c6278f6..e7284a2caffa89db39f2e2e683e59dc62d7d57d1 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/ethtool.h>
 #include <linux/topology.h>
 #include <linux/gfp.h>
-#include <linux/cpu_rmap.h>
 #include <linux/aer.h>
+#include <linux/interrupt.h>
 #include "net_driver.h"
 #include "efx.h"
 #include "nic.h"
@@ -638,14 +638,16 @@ static void efx_start_datapath(struct efx_nic *efx)
                           EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
                           efx->type->rx_buffer_padding);
        rx_buf_len = (sizeof(struct efx_rx_page_state) +
-                     EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
+                     NET_IP_ALIGN + efx->rx_dma_len);
        if (rx_buf_len <= PAGE_SIZE) {
                efx->rx_scatter = false;
                efx->rx_buffer_order = 0;
        } else if (efx->type->can_rx_scatter) {
+               BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
                BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
-                            EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
-                            PAGE_SIZE / 2);
+                            2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+                                      EFX_RX_BUF_ALIGNMENT) >
+                            PAGE_SIZE);
                efx->rx_scatter = true;
                efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
                efx->rx_buffer_order = 0;
@@ -1281,29 +1283,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
        return count;
 }
 
-static int
-efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
-{
-#ifdef CONFIG_RFS_ACCEL
-       unsigned int i;
-       int rc;
-
-       efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
-       if (!efx->net_dev->rx_cpu_rmap)
-               return -ENOMEM;
-       for (i = 0; i < efx->n_rx_channels; i++) {
-               rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
-                                     xentries[i].vector);
-               if (rc) {
-                       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
-                       efx->net_dev->rx_cpu_rmap = NULL;
-                       return rc;
-               }
-       }
-#endif
-       return 0;
-}
-
 /* Probe the number and type of interrupts we are able to obtain, and
  * the resulting numbers of channels and RX queues.
  */
@@ -1357,11 +1336,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                                efx->n_tx_channels = n_channels;
                                efx->n_rx_channels = n_channels;
                        }
-                       rc = efx_init_rx_cpu_rmap(efx, xentries);
-                       if (rc) {
-                               pci_disable_msix(efx->pci_dev);
-                               return rc;
-                       }
                        for (i = 0; i < efx->n_channels; i++)
                                efx_get_channel(efx, i)->irq =
                                        xentries[i].vector;
@@ -1425,6 +1399,10 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 
        BUG_ON(efx->state == STATE_DISABLED);
 
+       if (efx->eeh_disabled_legacy_irq) {
+               enable_irq(efx->legacy_irq);
+               efx->eeh_disabled_legacy_irq = false;
+       }
        if (efx->legacy_irq)
                efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
@@ -2118,7 +2096,7 @@ static void efx_update_name(struct efx_nic *efx)
 static int efx_netdev_event(struct notifier_block *this,
                            unsigned long event, void *ptr)
 {
-       struct net_device *net_dev = ptr;
+       struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
 
        if (net_dev->netdev_ops == &efx_netdev_ops &&
            event == NETDEV_CHANGENAME)
@@ -2363,7 +2341,7 @@ out:
  * Returns 0 if the recovery mechanisms are unsuccessful.
  * Returns a non-zero value otherwise.
  */
-static int efx_try_recovery(struct efx_nic *efx)
+int efx_try_recovery(struct efx_nic *efx)
 {
 #ifdef CONFIG_EEH
        /* A PCI error can occur and not be seen by EEH because nothing
@@ -2601,10 +2579,6 @@ static void efx_pci_remove_main(struct efx_nic *efx)
        BUG_ON(efx->state == STATE_READY);
        cancel_work_sync(&efx->reset_work);
 
-#ifdef CONFIG_RFS_ACCEL
-       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
-       efx->net_dev->rx_cpu_rmap = NULL;
-#endif
        efx_stop_interrupts(efx, false);
        efx_nic_fini_interrupt(efx);
        efx_fini_port(efx);
index 8372da239b43eef9928fa6d207b60a774a83aa5c..bdb30bbb0c973f13d05e86491b20f9a1e5470111 100644 (file)
@@ -124,6 +124,7 @@ extern const struct ethtool_ops efx_ethtool_ops;
 extern int efx_reset(struct efx_nic *efx, enum reset_type method);
 extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
 extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+extern int efx_try_recovery(struct efx_nic *efx);
 
 /* Global */
 extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
index 6e768175e7e00b6b4864088b094cd45a167782a9..1fc21458413d50ade70be1ff1ee9481aca7cf987 100644 (file)
@@ -1114,6 +1114,20 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
        return 0;
 }
 
+int efx_ethtool_get_ts_info(struct net_device *net_dev,
+                           struct ethtool_ts_info *ts_info)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       /* Software capabilities */
+       ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
+                                   SOF_TIMESTAMPING_SOFTWARE);
+       ts_info->phc_index = -1;
+
+       efx_ptp_get_ts_info(efx, ts_info);
+       return 0;
+}
+
 static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
                                         struct ethtool_eeprom *ee,
                                         u8 *data)
@@ -1176,7 +1190,7 @@ const struct ethtool_ops efx_ethtool_ops = {
        .get_rxfh_indir_size    = efx_ethtool_get_rxfh_indir_size,
        .get_rxfh_indir         = efx_ethtool_get_rxfh_indir,
        .set_rxfh_indir         = efx_ethtool_set_rxfh_indir,
-       .get_ts_info            = efx_ptp_get_ts_info,
+       .get_ts_info            = efx_ethtool_get_ts_info,
        .get_module_info        = efx_ethtool_get_module_info,
        .get_module_eeprom      = efx_ethtool_get_module_eeprom,
 };
index 2397f0e8d3ebf7e50c75e13cfeccc9b3c24237fa..b74a60ab9ac79913111a4f79d4a5a8ca1c723862 100644 (file)
@@ -1185,8 +1185,21 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 
        nhoff = skb_network_offset(skb);
 
-       if (skb->protocol != htons(ETH_P_IP))
+       if (skb->protocol == htons(ETH_P_8021Q)) {
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+                                   nhoff + sizeof(struct vlan_hdr));
+               if (((const struct vlan_hdr *)skb->data + nhoff)->
+                   h_vlan_encapsulated_proto != htons(ETH_P_IP))
+                       return -EPROTONOSUPPORT;
+
+               /* This is IP over 802.1q VLAN.  We can't filter on the
+                * IP 5-tuple and the vlan together, so just strip the
+                * vlan header and filter on the IP part.
+                */
+               nhoff += sizeof(struct vlan_hdr);
+       } else if (skb->protocol != htons(ETH_P_IP)) {
                return -EPROTONOSUPPORT;
+       }
 
        /* RFS must validate the IP header length before calling us */
        EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
index 9bd433a095c57b8a127b24a7ec5e55c60a50b24b..f4c7e6b67743b93eaa8d2c318705a05dd3302f8b 100644 (file)
 /* Maximum possible MTU the driver supports */
 #define EFX_MAX_MTU (9 * 1024)
 
-/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page. */
-#define EFX_RX_USR_BUF_SIZE 1824
+/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EFX_RX_USR_BUF_SIZE    (2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer.  Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EFX_RX_BUF_ALIGNMENT   L1_CACHE_BYTES
+#else
+#define EFX_RX_BUF_ALIGNMENT   4
+#endif
 
 /* Forward declare Precision Time Protocol (PTP) support structure. */
 struct efx_ptp_data;
@@ -231,6 +243,7 @@ struct efx_rx_buffer {
 #define EFX_RX_BUF_LAST_IN_PAGE        0x0001
 #define EFX_RX_PKT_CSUMMED     0x0002
 #define EFX_RX_PKT_DISCARD     0x0004
+#define EFX_RX_PKT_TCP         0x0040
 
 /**
  * struct efx_rx_page_state - Page-based rx buffer state
@@ -467,25 +480,12 @@ enum nic_state {
        STATE_RECOVERY = 3,     /* device recovering from PCI error */
 };
 
-/*
- * Alignment of page-allocated RX buffers
- *
- * Controls the number of bytes inserted at the start of an RX buffer.
- * This is the equivalent of NET_IP_ALIGN [which controls the alignment
- * of the skb->head for hardware DMA].
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define EFX_PAGE_IP_ALIGN 0
-#else
-#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
-#endif
-
 /*
  * Alignment of the skb->head which wraps a page-allocated RX buffer
  *
  * The skb allocated to wrap an rx_buffer can have this alignment. Since
  * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * EFX_PAGE_IP_ALIGN.
+ * NET_IP_ALIGN.
  */
 #define EFX_PAGE_SKB_ALIGN 2
 
@@ -785,9 +785,11 @@ struct efx_nic {
 
        char name[IFNAMSIZ];
        struct pci_dev *pci_dev;
+       unsigned int port_num;
        const struct efx_nic_type *type;
        int legacy_irq;
        bool legacy_irq_enabled;
+       bool eeh_disabled_legacy_irq;
        struct workqueue_struct *workqueue;
        char workqueue_name[16];
        struct work_struct reset_work;
@@ -917,7 +919,7 @@ static inline int efx_dev_registered(struct efx_nic *efx)
 
 static inline unsigned int efx_port_num(struct efx_nic *efx)
 {
-       return efx->net_dev->dev_id;
+       return efx->port_num;
 }
 
 /**
index b0503cd8c2a0807ad8becde51644d7054b56896d..56ed3bc71e00e0a4a24717e40d18918e4fd22fd6 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
+#include <linux/cpu_rmap.h>
 #include "net_driver.h"
 #include "bitfield.h"
 #include "efx.h"
@@ -1080,12 +1081,21 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
        rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
 
        if (likely(rx_ev_pkt_ok)) {
-               /* If packet is marked as OK and packet type is TCP/IP or
-                * UDP/IP, then we can rely on the hardware checksum.
+               /* If packet is marked as OK then we can rely on the
+                * hardware checksum and classification.
                 */
-               flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
-                        rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
-                       EFX_RX_PKT_CSUMMED : 0;
+               flags = 0;
+               switch (rx_ev_hdr_type) {
+               case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+                       flags |= EFX_RX_PKT_TCP;
+                       /* fall through */
+               case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+                       flags |= EFX_RX_PKT_CSUMMED;
+                       /* fall through */
+               case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+               case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+                       break;
+               }
        } else {
                flags = efx_handle_rx_not_ok(rx_queue, event);
        }
@@ -1579,6 +1589,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
        efx_readd(efx, &reg, FR_BZ_INT_ISR0);
        queues = EFX_EXTRACT_DWORD(reg, 0, 31);
 
+       /* Legacy interrupts are disabled too late by the EEH kernel
+        * code. Disable them earlier.
+        * If an EEH error occurred, the read will have returned all ones.
+        */
+       if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+           !efx->eeh_disabled_legacy_irq) {
+               disable_irq_nosync(efx->legacy_irq);
+               efx->eeh_disabled_legacy_irq = true;
+       }
+
        /* Handle non-event-queue sources */
        if (queues & (1U << efx->irq_level)) {
                syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
@@ -1687,6 +1707,7 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx)
 int efx_nic_init_interrupt(struct efx_nic *efx)
 {
        struct efx_channel *channel;
+       unsigned int n_irqs;
        int rc;
 
        if (!EFX_INT_MODE_USE_MSI(efx)) {
@@ -1707,7 +1728,19 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
                return 0;
        }
 
+#ifdef CONFIG_RFS_ACCEL
+       if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+               efx->net_dev->rx_cpu_rmap =
+                       alloc_irq_cpu_rmap(efx->n_rx_channels);
+               if (!efx->net_dev->rx_cpu_rmap) {
+                       rc = -ENOMEM;
+                       goto fail1;
+               }
+       }
+#endif
+
        /* Hook MSI or MSI-X interrupt */
+       n_irqs = 0;
        efx_for_each_channel(channel, efx) {
                rc = request_irq(channel->irq, efx_msi_interrupt,
                                 IRQF_PROBE_SHARED, /* Not shared */
@@ -1718,13 +1751,31 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
                                  "failed to hook IRQ %d\n", channel->irq);
                        goto fail2;
                }
+               ++n_irqs;
+
+#ifdef CONFIG_RFS_ACCEL
+               if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
+                   channel->channel < efx->n_rx_channels) {
+                       rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+                                             channel->irq);
+                       if (rc)
+                               goto fail2;
+               }
+#endif
        }
 
        return 0;
 
  fail2:
-       efx_for_each_channel(channel, efx)
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+       efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+       efx_for_each_channel(channel, efx) {
+               if (n_irqs-- == 0)
+                       break;
                free_irq(channel->irq, &efx->channel[channel->channel]);
+       }
  fail1:
        return rc;
 }
@@ -1734,11 +1785,14 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
        struct efx_channel *channel;
        efx_oword_t reg;
 
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+       efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+
        /* Disable MSI/MSI-X interrupts */
-       efx_for_each_channel(channel, efx) {
-               if (channel->irq)
-                       free_irq(channel->irq, &efx->channel[channel->channel]);
-       }
+       efx_for_each_channel(channel, efx)
+               free_irq(channel->irq, &efx->channel[channel->channel]);
 
        /* ACK legacy interrupt */
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
index 1b0003323498502741f2c36d5437586a8e8176f6..d63c2991a75105767487aa66ae1cbe284fc6abc1 100644 (file)
@@ -254,8 +254,8 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
 struct ethtool_ts_info;
 extern void efx_ptp_probe(struct efx_nic *efx);
 extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern int efx_ptp_get_ts_info(struct net_device *net_dev,
-                              struct ethtool_ts_info *ts_info);
+extern void efx_ptp_get_ts_info(struct efx_nic *efx,
+                               struct ethtool_ts_info *ts_info);
 extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
 extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
 extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
index 9a95abf2dedfad3dce17f6739eaba93a23e4c105..b495394a6dfa7f7f64353caf5929c4a76825e34e 100644 (file)
@@ -1203,18 +1203,16 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
        return 0;
 }
 
-int
-efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
 {
-       struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_ptp_data *ptp = efx->ptp_data;
 
        if (!ptp)
-               return -EOPNOTSUPP;
+               return;
 
-       ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE |
-                                   SOF_TIMESTAMPING_RX_HARDWARE |
-                                   SOF_TIMESTAMPING_RAW_HARDWARE);
+       ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+                                    SOF_TIMESTAMPING_RX_HARDWARE |
+                                    SOF_TIMESTAMPING_RAW_HARDWARE);
        ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
        ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
        ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
@@ -1224,7 +1222,6 @@ efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
                               1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
                               1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
                               1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
-       return 0;
 }
 
 int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
index e73e30bac10e268a6f031d717a3551cda1ddeeae..65646cd7af8e60fc604de59b71cded97703b95b8 100644 (file)
@@ -36,7 +36,7 @@
 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
 
 /* Size of buffer allocated for skb header area. */
-#define EFX_SKB_HEADERS  64u
+#define EFX_SKB_HEADERS  128u
 
 /* This is the percentage fill level below which new RX descriptors
  * will be added to the RX descriptor ring.
@@ -93,8 +93,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
 
 void efx_rx_config_page_split(struct efx_nic *efx)
 {
-       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
-                                     L1_CACHE_BYTES);
+       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+                                     EFX_RX_BUF_ALIGNMENT);
        efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
                ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
                 efx->rx_page_buf_step);
@@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
                do {
                        index = rx_queue->added_count & rx_queue->ptr_mask;
                        rx_buf = efx_rx_buffer(rx_queue, index);
-                       rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+                       rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
                        rx_buf->page = page;
-                       rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+                       rx_buf->page_offset = page_offset + NET_IP_ALIGN;
                        rx_buf->len = efx->rx_dma_len;
                        rx_buf->flags = 0;
                        ++rx_queue->added_count;
@@ -598,6 +598,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
 
        /* Set the SKB flags */
        skb_checksum_none_assert(skb);
+       if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
        if (channel->type->receive_skb)
                if (channel->type->receive_skb(channel, skb))
@@ -627,7 +629,7 @@ void __efx_rx_packet(struct efx_channel *channel)
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
-       if (!channel->type->receive_skb)
+       if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
                efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
        else
                efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
@@ -675,7 +677,7 @@ static void efx_init_rx_recycle_ring(struct efx_nic *efx,
 #ifdef CONFIG_PPC64
        bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
 #else
-       if (efx->pci_dev->dev.iommu_group)
+       if (iommu_present(&pci_bus_type))
                bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
        else
                bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
index 51669244d1548f416042f5f1c0670c6383599099..8c91775e3c5f19bcd51f409b8e6fca21133d84cf 100644 (file)
@@ -304,7 +304,7 @@ static int siena_probe_nic(struct efx_nic *efx)
        }
 
        efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
-       efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
+       efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
 
        efx_mcdi_init(efx);
 
index c1c4bb868a3b06ba23faf5ec7dcd80d06034ed79..e832f46660c9edab927d7d335cd6be6a6ff7d989 100644 (file)
@@ -22,7 +22,6 @@ config SGI_IOC3_ETH
        bool "SGI IOC3 Ethernet"
        depends on PCI && SGI_IP27
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
index 7ed08c32a9c512621665d14566e3371f24108398..ffa78432164dd0f9a20e5ff66e67fedca1ab4425 100644 (file)
@@ -1398,16 +1398,6 @@ static struct pci_driver ioc3_driver = {
        .remove         = ioc3_remove_one,
 };
 
-static int __init ioc3_init_module(void)
-{
-       return pci_register_driver(&ioc3_driver);
-}
-
-static void __exit ioc3_cleanup_module(void)
-{
-       pci_unregister_driver(&ioc3_driver);
-}
-
 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned long data;
@@ -1677,9 +1667,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
        netif_wake_queue(dev);                  /* Let us get going again. */
 }
 
+module_pci_driver(ioc3_driver);
 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
 MODULE_LICENSE("GPL");
-
-module_init(ioc3_init_module);
-module_exit(ioc3_cleanup_module);
index 4bdbaad9932df37f1fb92e8eb3c5e76bc2efc6be..9f5f35e041ac599681576aa06b9ee24033cfbd12 100644 (file)
@@ -863,7 +863,6 @@ static int __exit meth_remove(struct platform_device *pdev)
 
        unregister_netdev(dev);
        free_netdev(dev);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 28f7268f1b8802eed15756784b5d68f298a7976a..5eb933c97bbacf26123d1857949515e6b04adbee 100644 (file)
@@ -1578,19 +1578,7 @@ static struct pci_driver sc92031_pci_driver = {
        .resume         = sc92031_resume,
 };
 
-static int __init sc92031_init(void)
-{
-       return pci_register_driver(&sc92031_pci_driver);
-}
-
-static void __exit sc92031_exit(void)
-{
-       pci_unregister_driver(&sc92031_pci_driver);
-}
-
-module_init(sc92031_init);
-module_exit(sc92031_exit);
-
+module_pci_driver(sc92031_pci_driver);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
 MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
index f1135cc1bd48abd5b60714c459922df7f762f06a..68d052b09af1e1ee9950f3b2e0944aca7a8e86c3 100644 (file)
@@ -22,7 +22,6 @@ config SIS900
        tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This is a driver for the Fast Ethernet PCI network cards based on
@@ -39,7 +38,6 @@ config SIS190
        tristate "SiS190/SiS191 gigabit ethernet support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or
index 9a9c379420d120049bb43e53da5849be904d234d..02df0894690d698c7234e8057a58714ad65a2915 100644 (file)
@@ -1934,15 +1934,4 @@ static struct pci_driver sis190_pci_driver = {
        .remove         = sis190_remove_one,
 };
 
-static int __init sis190_init_module(void)
-{
-       return pci_register_driver(&sis190_pci_driver);
-}
-
-static void __exit sis190_cleanup_module(void)
-{
-       pci_unregister_driver(&sis190_pci_driver);
-}
-
-module_init(sis190_init_module);
-module_exit(sis190_cleanup_module);
+module_pci_driver(sis190_pci_driver);
index bb4c1674ff99b1df733baf3823f01ca565fe22a3..068fc44d37e1d0723849da40ade4d77671be0917 100644 (file)
@@ -37,7 +37,6 @@ config SMC9194
 config SMC91X
        tristate "SMC 91C9x/91C1xxx support"
        select CRC32
-       select NET_CORE
        select MII
        depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
                    MN10300 || COLDFIRE || ARM64)
@@ -57,7 +56,6 @@ config PCMCIA_SMC91C92
        tristate "SMC 91Cxx PCMCIA support"
        depends on PCMCIA
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA
@@ -70,7 +68,6 @@ config EPIC100
        tristate "SMC EtherPower II"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC,
@@ -81,7 +78,6 @@ config EPIC100
 config SMC911X
        tristate "SMSC LAN911[5678] support"
        select CRC32
-       select NET_CORE
        select MII
        depends on (ARM || SUPERH || MN10300)
        ---help---
@@ -97,9 +93,8 @@ config SMC911X
 
 config SMSC911X
        tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
-       depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300)
+       depends on HAS_IOMEM
        select CRC32
-       select NET_CORE
        select MII
        select PHYLIB
        ---help---
index 9dd842dbb8598b9d7825637238ce2d64aaa53f4a..345558fe7367fdeee216642981b0c632f734d9c0 100644 (file)
@@ -2087,7 +2087,6 @@ static int smc911x_drv_probe(struct platform_device *pdev)
        ndev->base_addr = res->start;
        ret = smc911x_probe(ndev);
        if (ret != 0) {
-               platform_set_drvdata(pdev, NULL);
                iounmap(addr);
 release_both:
                free_netdev(ndev);
@@ -2113,7 +2112,6 @@ static int smc911x_drv_remove(struct platform_device *pdev)
        struct resource *res;
 
        DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
-       platform_set_drvdata(pdev, NULL);
 
        unregister_netdev(ndev);
 
index dfbf978315dfb31a8cbbd01ce5beae06abc5f398..cde13be7c7ded5e5fc7265052693f6281dd7cd97 100644 (file)
@@ -2299,7 +2299,6 @@ static int smc_drv_probe(struct platform_device *pdev)
        return 0;
 
  out_iounmap:
-       platform_set_drvdata(pdev, NULL);
        iounmap(addr);
  out_release_attrib:
        smc_release_attrib(pdev, ndev);
@@ -2319,8 +2318,6 @@ static int smc_drv_remove(struct platform_device *pdev)
        struct smc_local *lp = netdev_priv(ndev);
        struct resource *res;
 
-       platform_set_drvdata(pdev, NULL);
-
        unregister_netdev(ndev);
 
        free_irq(ndev->irq, ndev);
index 3663b9e04a31345b3a0a02ba28e8c9111de11737..a1419211585bb150188262adec08291ad0a82f38 100644 (file)
@@ -2284,7 +2284,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
        mdiobus_unregister(pdata->mii_bus);
        mdiobus_free(pdata->mii_bus);
 
-       platform_set_drvdata(pdev, NULL);
        unregister_netdev(dev);
        free_irq(dev->irq, dev);
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2539,7 +2538,6 @@ out_disable_resources:
 out_enable_resources_fail:
        smsc911x_free_resources(pdev);
 out_request_resources_fail:
-       platform_set_drvdata(pdev, NULL);
        iounmap(pdata->ioaddr);
        free_netdev(dev);
 out_release_io_1:
index f695a50bac47d99cdc141a3cf059dff340e54dc3..6e52c0f74cd9daad4ac5f80e1db98eec04958d55 100644 (file)
@@ -1,7 +1,6 @@
 config STMMAC_ETH
        tristate "STMicroelectronics 10/100/1000 Ethernet driver"
-       depends on HAS_IOMEM
-       select NET_CORE
+       depends on HAS_IOMEM && HAS_DMA
        select MII
        select PHYLIB
        select CRC32
index 618446ae1ec1b3b2ae924b2a5d083215c6b0be15..ee919ca8b8a0ad5828e84d7522d03218895d9b79 100644 (file)
@@ -1899,7 +1899,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
 #ifdef STMMAC_XMIT_DEBUG
        if (netif_msg_pktdata(priv)) {
-               pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d"
+               pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
                        __func__, (priv->cur_tx % txsize),
                        (priv->dirty_tx % txsize), entry, first, nfrags);
                if (priv->extend_desc)
index 1d3780f55ba2c9397641b1b5986b952680259e0c..17bc7827e7cafded7ce96bb4a024ac6d8ebf525d 100644 (file)
@@ -171,8 +171,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
        if (priv->plat->exit)
                priv->plat->exit(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        return ret;
 }
 
index 95cff98d8a34e2a4ba355b03554562bb76d23152..fa322409bff33376dbedc59f6921a2337a60eebc 100644 (file)
@@ -10108,7 +10108,7 @@ static int niu_of_probe(struct platform_device *op)
                goto err_out_iounmap;
        }
 
-       dev_set_drvdata(&op->dev, dev);
+       platform_set_drvdata(op, dev);
 
        niu_device_announce(np);
 
@@ -10145,7 +10145,7 @@ err_out:
 
 static int niu_of_remove(struct platform_device *op)
 {
-       struct net_device *dev = dev_get_drvdata(&op->dev);
+       struct net_device *dev = platform_get_drvdata(op);
 
        if (dev) {
                struct niu *np = netdev_priv(dev);
@@ -10175,7 +10175,6 @@ static int niu_of_remove(struct platform_device *op)
                niu_put_parent(np);
 
                free_netdev(dev);
-               dev_set_drvdata(&op->dev, NULL);
        }
        return 0;
 }
index 054975939a184cc48035009420e8d1663871b0f9..0d43fa9ff9801ccbc19a74510f9aef952e22a3f8 100644 (file)
@@ -995,7 +995,6 @@ static void bigmac_set_multicast(struct net_device *dev)
        struct bigmac *bp = netdev_priv(dev);
        void __iomem *bregs = bp->bregs;
        struct netdev_hw_addr *ha;
-       int i;
        u32 tmp, crc;
 
        /* Disable the receiver.  The bit self-clears when
@@ -1017,10 +1016,7 @@ static void bigmac_set_multicast(struct net_device *dev)
                tmp |= BIGMAC_RXCFG_PMISC;
                sbus_writel(tmp, bregs + BMAC_RXCFG);
        } else {
-               u16 hash_table[4];
-
-               for (i = 0; i < 4; i++)
-                       hash_table[i] = 0;
+               u16 hash_table[4] = { 0 };
 
                netdev_for_each_mc_addr(ha, dev) {
                        crc = ether_crc_le(6, ha->addr);
index 5f3f9d52757dbffb60eb277d45acdc6a29eb49f6..e62df2b81302bd32881ca22daef127e04985679e 100644 (file)
@@ -3028,15 +3028,4 @@ static struct pci_driver gem_driver = {
 #endif /* CONFIG_PM */
 };
 
-static int __init gem_init(void)
-{
-       return pci_register_driver(&gem_driver);
-}
-
-static void __exit gem_cleanup(void)
-{
-       pci_unregister_driver(&gem_driver);
-}
-
-module_init(gem_init);
-module_exit(gem_cleanup);
+module_pci_driver(gem_driver);
index 436fa9d5a07190a555adc6a6b7119700845b2b73..171f5b0809c4d1659cd26d8ba7255a915c1d8f70 100644 (file)
@@ -2506,7 +2506,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child)
        struct quattro *qp;
 
        op = to_platform_device(parent);
-       qp = dev_get_drvdata(&op->dev);
+       qp = platform_get_drvdata(op);
        if (qp)
                return qp;
 
@@ -2521,7 +2521,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child)
                qp->next = qfe_sbus_list;
                qfe_sbus_list = qp;
 
-               dev_set_drvdata(&op->dev, qp);
+               platform_set_drvdata(op, qp);
        }
        return qp;
 }
index 8182591bc1876c77ddae935d6da64d9200a2dbcc..b072f4dba033c1661bf4a341747b1d82d4874fc7 100644 (file)
@@ -767,7 +767,7 @@ static struct sunqec *get_qec(struct platform_device *child)
        struct platform_device *op = to_platform_device(child->dev.parent);
        struct sunqec *qecp;
 
-       qecp = dev_get_drvdata(&op->dev);
+       qecp = platform_get_drvdata(op);
        if (!qecp) {
                qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
                if (qecp) {
@@ -801,7 +801,7 @@ static struct sunqec *get_qec(struct platform_device *child)
                                goto fail;
                        }
 
-                       dev_set_drvdata(&op->dev, qecp);
+                       platform_set_drvdata(op, qecp);
 
                        qecp->next_module = root_qec_dev;
                        root_qec_dev = qecp;
@@ -902,7 +902,7 @@ static int qec_ether_init(struct platform_device *op)
        if (res)
                goto fail;
 
-       dev_set_drvdata(&op->dev, qe);
+       platform_set_drvdata(op, qe);
 
        printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
               dev->dev_addr);
@@ -934,7 +934,7 @@ static int qec_sbus_probe(struct platform_device *op)
 
 static int qec_sbus_remove(struct platform_device *op)
 {
-       struct sunqe *qp = dev_get_drvdata(&op->dev);
+       struct sunqe *qp = platform_get_drvdata(op);
        struct net_device *net_dev = qp->dev;
 
        unregister_netdev(net_dev);
@@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op)
 
        free_netdev(net_dev);
 
-       dev_set_drvdata(&op->dev, NULL);
-
        return 0;
 }
 
index 21a5b291b4b39d0ae65dcd8766c9e51a25665dee..101b037a7dcea6b74c13022578611838c49df421 100644 (file)
@@ -1554,6 +1554,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                if (mac_addr)
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
 
+               slave_data->phy_if = of_get_phy_mode(slave_node);
+
                if (data->dual_emac) {
                        if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
@@ -1679,7 +1681,7 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->rx_packet_max = max(rx_packet_max, 128);
        priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
        priv->irq_enabled = true;
-       if (!ndev) {
+       if (!priv->cpts) {
                pr_err("error allocating cpts\n");
                goto clean_ndev_ret;
        }
@@ -1940,7 +1942,6 @@ static int cpsw_remove(struct platform_device *pdev)
        struct cpsw_priv *priv = netdev_priv(ndev);
        int i;
 
-       platform_set_drvdata(pdev, NULL);
        if (priv->data.dual_emac)
                unregister_netdev(cpsw_get_slave_ndev(priv, 1));
        unregister_netdev(ndev);
index 49dfd592ac1ecd4e0a86d8d5ad01572a97e5c346..a377bc72774084b567642c7dab0e4e89922e4cf6 100644 (file)
@@ -64,6 +64,7 @@
 #define CPDMA_DESC_TO_PORT_EN  BIT(20)
 #define CPDMA_TO_PORT_SHIFT    16
 #define CPDMA_DESC_PORT_MASK   (BIT(18) | BIT(17) | BIT(16))
+#define CPDMA_DESC_CRC_LEN     4
 
 #define CPDMA_TEARDOWN_VALUE   0xfffffffc
 
@@ -798,6 +799,10 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
                status = -EBUSY;
                goto unlock_ret;
        }
+
+       if (status & CPDMA_DESC_PASS_CRC)
+               outlen -= CPDMA_DESC_CRC_LEN;
+
        status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
                            CPDMA_DESC_PORT_MASK);
 
index 860e15ddfbcbcd7232da1c71779793442d72a1c1..f118d7133128a18005ff4fb96114873bd7f140bf 100644 (file)
@@ -1532,7 +1532,7 @@ static int emac_dev_open(struct net_device *ndev)
        struct device *emac_dev = &ndev->dev;
        u32 cnt;
        struct resource *res;
-       int q, m, ret;
+       int ret;
        int i = 0;
        int k = 0;
        struct emac_priv *priv = netdev_priv(ndev);
@@ -1567,8 +1567,9 @@ static int emac_dev_open(struct net_device *ndev)
 
        while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
                for (i = res->start; i <= res->end; i++) {
-                       if (request_irq(i, emac_irq, IRQF_DISABLED,
-                                       ndev->name, ndev))
+                       if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
+                                            IRQF_DISABLED,
+                                            ndev->name, ndev))
                                goto rollback;
                }
                k++;
@@ -1641,15 +1642,7 @@ static int emac_dev_open(struct net_device *ndev)
 
 rollback:
 
-       dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
-
-       for (q = k; k >= 0; k--) {
-               for (m = i; m >= res->start; m--)
-                       free_irq(m, ndev);
-               res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
-               m = res->end;
-       }
-
+       dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
        ret = -EBUSY;
 err:
        pm_runtime_put(&priv->pdev->dev);
@@ -1667,9 +1660,6 @@ err:
  */
 static int emac_dev_stop(struct net_device *ndev)
 {
-       struct resource *res;
-       int i = 0;
-       int irq_num;
        struct emac_priv *priv = netdev_priv(ndev);
        struct device *emac_dev = &ndev->dev;
 
@@ -1685,13 +1675,6 @@ static int emac_dev_stop(struct net_device *ndev)
        if (priv->phydev)
                phy_disconnect(priv->phydev);
 
-       /* Free IRQ */
-       while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
-               for (irq_num = res->start; irq_num <= res->end; irq_num++)
-                       free_irq(irq_num, priv->ndev);
-               i++;
-       }
-
        if (netif_msg_drv(priv))
                dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
 
@@ -1771,29 +1754,22 @@ static const struct net_device_ops emac_netdev_ops = {
 #endif
 };
 
-#ifdef CONFIG_OF
-static struct emac_platform_data
-       *davinci_emac_of_get_pdata(struct platform_device *pdev,
-       struct emac_priv *priv)
+static struct emac_platform_data *
+davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
 {
        struct device_node *np;
        struct emac_platform_data *pdata = NULL;
        const u8 *mac_addr;
-       u32 data;
-       int ret;
 
-       pdata = pdev->dev.platform_data;
-       if (!pdata) {
-               pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-               if (!pdata)
-                       goto nodata;
-       }
+       if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+               return pdev->dev.platform_data;
+
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
 
        np = pdev->dev.of_node;
-       if (!np)
-               goto nodata;
-       else
-               pdata->version = EMAC_VERSION_2;
+       pdata->version = EMAC_VERSION_2;
 
        if (!is_valid_ether_addr(pdata->mac_addr)) {
                mac_addr = of_get_mac_address(np);
@@ -1801,47 +1777,31 @@ static struct emac_platform_data
                        memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
        }
 
-       ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data);
-       if (!ret)
-               pdata->ctrl_reg_offset = data;
+       of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
+                            &pdata->ctrl_reg_offset);
 
-       ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
-               &data);
-       if (!ret)
-               pdata->ctrl_mod_reg_offset = data;
+       of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
+                            &pdata->ctrl_mod_reg_offset);
 
-       ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data);
-       if (!ret)
-               pdata->ctrl_ram_offset = data;
+       of_property_read_u32(np, "ti,davinci-ctrl-ram-offset",
+                            &pdata->ctrl_ram_offset);
 
-       ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data);
-       if (!ret)
-               pdata->ctrl_ram_size = data;
+       of_property_read_u32(np, "ti,davinci-ctrl-ram-size",
+                            &pdata->ctrl_ram_size);
 
-       ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data);
-       if (!ret)
-               pdata->rmii_en = data;
+       of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en);
 
-       ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data);
-       if (!ret)
-               pdata->no_bd_ram = data;
+       pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram");
 
        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
        if (!priv->phy_node)
                pdata->phy_id = "";
 
        pdev->dev.platform_data = pdata;
-nodata:
+
        return  pdata;
 }
-#else
-static struct emac_platform_data
-       *davinci_emac_of_get_pdata(struct platform_device *pdev,
-       struct emac_priv *priv)
-{
-       return  pdev->dev.platform_data;
-}
-#endif
+
 /**
  * davinci_emac_probe - EMAC device probe
  * @pdev: The DaVinci EMAC device that we are removing
@@ -1856,7 +1816,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
        struct resource *res;
        struct net_device *ndev;
        struct emac_priv *priv;
-       unsigned long size, hw_ram_addr;
+       unsigned long hw_ram_addr;
        struct emac_platform_data *pdata;
        struct device *emac_dev;
        struct cpdma_params dma_params;
@@ -1907,25 +1867,11 @@ static int davinci_emac_probe(struct platform_device *pdev)
        emac_dev = &ndev->dev;
        /* Get EMAC platform data */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev,"error getting res\n");
-               rc = -ENOENT;
-               goto no_pdata;
-       }
-
        priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
-       size = resource_size(res);
-       if (!devm_request_mem_region(&pdev->dev, res->start,
-                                    size, ndev->name)) {
-               dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
-               rc = -ENXIO;
-               goto no_pdata;
-       }
-
-       priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size);
-       if (!priv->remap_addr) {
+       priv->remap_addr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->remap_addr)) {
                dev_err(&pdev->dev, "unable to map IO\n");
-               rc = -ENOMEM;
+               rc = PTR_ERR(priv->remap_addr);
                goto no_pdata;
        }
        priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
@@ -2037,8 +1983,6 @@ static int davinci_emac_remove(struct platform_device *pdev)
 
        dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
 
-       platform_set_drvdata(pdev, NULL);
-
        if (priv->txchan)
                cpdma_chan_destroy(priv->txchan);
        if (priv->rxchan)
@@ -2078,11 +2022,13 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
        .resume         = davinci_emac_resume,
 };
 
+#if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id davinci_emac_of_match[] = {
        {.compatible = "ti,davinci-dm6467-emac", },
        {},
 };
 MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
+#endif
 
 /* davinci_emac_driver: EMAC platform driver structure */
 static struct platform_driver davinci_emac_driver = {
index 12aec173564ca6fdc6b6e15ab8a8dcc0c293c9a4..dac6f5832d1800b084456dec2bd87041abfe2ef1 100644 (file)
@@ -291,6 +291,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_OF)
 static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
                         struct platform_device *pdev)
 {
@@ -308,7 +309,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
 
        return 0;
 }
-
+#endif
 
 static int davinci_mdio_probe(struct platform_device *pdev)
 {
@@ -449,10 +450,9 @@ static int davinci_mdio_suspend(struct device *dev)
        __raw_writel(ctrl, &data->regs->control);
        wait_for_idle(data);
 
-       pm_runtime_put_sync(data->dev);
-
        data->suspended = true;
        spin_unlock(&data->lock);
+       pm_runtime_put_sync(data->dev);
 
        return 0;
 }
@@ -460,15 +460,12 @@ static int davinci_mdio_suspend(struct device *dev)
 static int davinci_mdio_resume(struct device *dev)
 {
        struct davinci_mdio_data *data = dev_get_drvdata(dev);
-       u32 ctrl;
 
-       spin_lock(&data->lock);
        pm_runtime_get_sync(data->dev);
 
+       spin_lock(&data->lock);
        /* restart the scan state machine */
-       ctrl = __raw_readl(&data->regs->control);
-       ctrl |= CONTROL_ENABLE;
-       __raw_writel(ctrl, &data->regs->control);
+       __davinci_mdio_reset(data);
 
        data->suspended = false;
        spin_unlock(&data->lock);
@@ -477,15 +474,17 @@ static int davinci_mdio_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops davinci_mdio_pm_ops = {
-       .suspend        = davinci_mdio_suspend,
-       .resume         = davinci_mdio_resume,
+       .suspend_late   = davinci_mdio_suspend,
+       .resume_early   = davinci_mdio_resume,
 };
 
+#if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id davinci_mdio_of_mtable[] = {
        { .compatible = "ti,davinci_mdio", },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
+#endif
 
 static struct platform_driver davinci_mdio_driver = {
        .driver = {
index 60c400f6d01ff540b6a2e132c3520972336adb1a..59abfbcd0d551e02cb25fae12216720a154f4bda 100644 (file)
@@ -533,7 +533,6 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
                /* This is a hack. We need to know which board structure
                 * is suited for this adapter */
                device_id = inw(ioaddr + EISA_ID2);
-               priv->is_eisa = 1;
                if (device_id == 0x20F1) {
                        priv->adapter = &board_info[13]; /* NetFlex-3/E */
                        priv->adapter_rev = 23;         /* TLAN 2.3 */
index 5fc98a8e488900c07757b59c7464b5e0a54b7855..2eb33a250788abca1235d3ee93d809b422eb328d 100644 (file)
@@ -207,7 +207,6 @@ struct tlan_priv {
        u8                      tlan_full_duplex;
        spinlock_t              lock;
        u8                      link;
-       u8                      is_eisa;
        struct work_struct                      tlan_tqueue;
        u8                      neg_be_verbose;
 };
index fe256094db35d8c745873a29851e5622746b999a..a971b9cca564c910f8e928806ea9ac443ed22cf2 100644 (file)
@@ -2209,18 +2209,6 @@ MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
 module_param_named(duplex, options.duplex, int, 0);
 MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
 
-static int __init tc35815_init_module(void)
-{
-       return pci_register_driver(&tc35815_pci_driver);
-}
-
-static void __exit tc35815_cleanup_module(void)
-{
-       pci_unregister_driver(&tc35815_pci_driver);
-}
-
-module_init(tc35815_init_module);
-module_exit(tc35815_cleanup_module);
-
+module_pci_driver(tc35815_pci_driver);
 MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
 MODULE_LICENSE("GPL");
index 3c69a04608324c8c16bc33cee78f03f524329018..01bdc6ca0755feff940d8bede196ba83d85c2f06 100644 (file)
@@ -1682,7 +1682,6 @@ static int tsi108_ether_remove(struct platform_device *pdev)
 
        unregister_netdev(dev);
        tsi108_stop_ethernet(dev);
-       platform_set_drvdata(pdev, NULL);
        iounmap(priv->regs);
        iounmap(priv->phyregs);
        free_netdev(dev);
index 68a9ba66feba866f2da80d39f34eed63fc8082ca..8a049a2b44742aabe24221b12a6bcb2a4caf357f 100644 (file)
@@ -5,7 +5,6 @@
 config NET_VENDOR_VIA
        bool "VIA devices"
        default y
-       depends on PCI
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -22,7 +21,6 @@ config VIA_RHINE
        tristate "VIA Rhine support"
        depends on PCI
        select CRC32
-       select NET_CORE
        select MII
        ---help---
          If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A),
@@ -45,10 +43,9 @@ config VIA_RHINE_MMIO
 
 config VIA_VELOCITY
        tristate "VIA Velocity support"
-       depends on PCI
+       depends on (PCI || USE_OF)
        select CRC32
        select CRC_CCITT
-       select NET_CORE
        select MII
        ---help---
          If you have a VIA "Velocity" based network card say Y here.
index fb6248956ee26ec55c8c4cbcd853fc721c74e1d7..76919948b4ee32bd4e79a9ddce25e962e6437add 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/init.h>
+#include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
 #include <linux/if.h>
 #include <linux/uaccess.h>
 #include <linux/proc_fs.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/inetdevice.h>
+#include <linux/platform_device.h>
 #include <linux/reboot.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 
 #include "via-velocity.h"
 
+enum velocity_bus_type {
+       BUS_PCI,
+       BUS_PLATFORM,
+};
 
 static int velocity_nics;
 static int msglevel = MSG_LEVEL_INFO;
 
+static void velocity_set_power_state(struct velocity_info *vptr, char state)
+{
+       void *addr = vptr->mac_regs;
+
+       if (vptr->pdev)
+               pci_set_power_state(vptr->pdev, state);
+       else
+               writeb(state, addr + 0x154);
+}
+
 /**
  *     mac_get_cam_mask        -       Read a CAM mask
  *     @regs: register block for this velocity
@@ -361,12 +380,23 @@ static struct velocity_info_tbl chip_info_table[] = {
  *     Describe the PCI device identifiers that we support in this
  *     device driver. Used for hotplug autoloading.
  */
-static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
+
+static DEFINE_PCI_DEVICE_TABLE(velocity_pci_id_table) = {
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
        { }
 };
 
-MODULE_DEVICE_TABLE(pci, velocity_id_table);
+MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
+
+/**
+ *     Describe the OF device identifiers that we support in this
+ *     device driver. Used for devicetree nodes.
+ */
+static struct of_device_id velocity_of_ids[] = {
+       { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
+       { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, velocity_of_ids);
 
 /**
  *     get_chip_name   -       identifier to name
@@ -384,29 +414,6 @@ static const char *get_chip_name(enum chip_type chip_id)
        return chip_info_table[i].name;
 }
 
-/**
- *     velocity_remove1        -       device unplug
- *     @pdev: PCI device being removed
- *
- *     Device unload callback. Called on an unplug or on module
- *     unload for each active device that is present. Disconnects
- *     the device from the network layer and frees all the resources
- */
-static void velocity_remove1(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct velocity_info *vptr = netdev_priv(dev);
-
-       unregister_netdev(dev);
-       iounmap(vptr->mac_regs);
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-       free_netdev(dev);
-
-       velocity_nics--;
-}
-
 /**
  *     velocity_set_int_opt    -       parser for integer options
  *     @opt: pointer to option value
@@ -998,9 +1005,9 @@ static void velocity_print_link_status(struct velocity_info *vptr)
 {
 
        if (vptr->mii_status & VELOCITY_LINK_FAIL) {
-               VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
+               VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name);
        } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
-               VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
+               VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name);
 
                if (vptr->mii_status & VELOCITY_SPEED_1000)
                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
@@ -1014,7 +1021,7 @@ static void velocity_print_link_status(struct velocity_info *vptr)
                else
                        VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
        } else {
-               VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
+               VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name);
                switch (vptr->options.spd_dpx) {
                case SPD_DPX_1000_FULL:
                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
@@ -1180,6 +1187,17 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
        u16 BMCR;
 
        switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
+       case PHYID_ICPLUS_IP101A:
+               MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
+                                               MII_ADVERTISE, vptr->mac_regs);
+               if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+                       MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
+                                                               vptr->mac_regs);
+               else
+                       MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
+                                                               vptr->mac_regs);
+               MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
+               break;
        case PHYID_CICADA_CS8201:
                /*
                 *      Reset to hardware default
@@ -1311,6 +1329,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
                                    enum velocity_init_type type)
 {
        struct mac_regs __iomem *regs = vptr->mac_regs;
+       struct net_device *netdev = vptr->netdev;
        int i, mii_status;
 
        mac_wol_reset(regs);
@@ -1319,7 +1338,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
        case VELOCITY_INIT_RESET:
        case VELOCITY_INIT_WOL:
 
-               netif_stop_queue(vptr->dev);
+               netif_stop_queue(netdev);
 
                /*
                 *      Reset RX to prevent RX pointer not on the 4X location
@@ -1332,7 +1351,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
                        velocity_print_link_status(vptr);
                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
-                               netif_wake_queue(vptr->dev);
+                               netif_wake_queue(netdev);
                }
 
                enable_flow_control_ability(vptr);
@@ -1352,9 +1371,11 @@ static void velocity_init_registers(struct velocity_info *vptr,
                velocity_soft_reset(vptr);
                mdelay(5);
 
-               mac_eeprom_reload(regs);
-               for (i = 0; i < 6; i++)
-                       writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
+               if (!vptr->no_eeprom) {
+                       mac_eeprom_reload(regs);
+                       for (i = 0; i < 6; i++)
+                               writeb(netdev->dev_addr[i], regs->PAR + i);
+               }
 
                /*
                 *      clear Pre_ACPI bit.
@@ -1377,7 +1398,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
                /*
                 *      Set packet filter: Receive directed and broadcast address
                 */
-               velocity_set_multi(vptr->dev);
+               velocity_set_multi(netdev);
 
                /*
                 *      Enable MII auto-polling
@@ -1404,14 +1425,14 @@ static void velocity_init_registers(struct velocity_info *vptr,
                writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
 
                mii_status = velocity_get_opt_media_mode(vptr);
-               netif_stop_queue(vptr->dev);
+               netif_stop_queue(netdev);
 
                mii_init(vptr, mii_status);
 
                if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
                        velocity_print_link_status(vptr);
                        if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
-                               netif_wake_queue(vptr->dev);
+                               netif_wake_queue(netdev);
                }
 
                enable_flow_control_ability(vptr);
@@ -1459,7 +1480,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
        struct velocity_opt *opt = &vptr->options;
        const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
        const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
-       struct pci_dev *pdev = vptr->pdev;
        dma_addr_t pool_dma;
        void *pool;
        unsigned int i;
@@ -1467,14 +1487,14 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
        /*
         * Allocate all RD/TD rings a single pool.
         *
-        * pci_alloc_consistent() fulfills the requirement for 64 bytes
+        * dma_alloc_coherent() fulfills the requirement for 64 bytes
         * alignment
         */
-       pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
-                                   rx_ring_size, &pool_dma);
+       pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
+                                   rx_ring_size, &pool_dma, GFP_ATOMIC);
        if (!pool) {
-               dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
-                       vptr->dev->name);
+               dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
+                       vptr->netdev->name);
                return -ENOMEM;
        }
 
@@ -1514,7 +1534,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
        struct rx_desc *rd = &(vptr->rx.ring[idx]);
        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
 
-       rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64);
+       rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
        if (rd_info->skb == NULL)
                return -ENOMEM;
 
@@ -1524,8 +1544,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
         */
        skb_reserve(rd_info->skb,
                        64 - ((unsigned long) rd_info->skb->data & 63));
-       rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
-                                       vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
+       rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
+                                       vptr->rx.buf_sz, DMA_FROM_DEVICE);
 
        /*
         *      Fill in the descriptor to match
@@ -1588,8 +1608,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
 
                if (!rd_info->skb)
                        continue;
-               pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
-                                PCI_DMA_FROMDEVICE);
+               dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
+                                DMA_FROM_DEVICE);
                rd_info->skb_dma = 0;
 
                dev_kfree_skb(rd_info->skb);
@@ -1620,7 +1640,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
 
        if (velocity_rx_refill(vptr) != vptr->options.numrx) {
                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
-                       "%s: failed to allocate RX buffer.\n", vptr->dev->name);
+                       "%s: failed to allocate RX buffer.\n", vptr->netdev->name);
                velocity_free_rd_ring(vptr);
                goto out;
        }
@@ -1670,7 +1690,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr)
        const int size = vptr->options.numrx * sizeof(struct rx_desc) +
                vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
 
-       pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
+       dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
 }
 
 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
@@ -1727,8 +1747,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
                                pktlen = max_t(size_t, pktlen,
                                                td->td_buf[i].size & ~TD_QUEUE);
 
-                       pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
-                                       le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
+                       dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
+                                       le16_to_cpu(pktlen), DMA_TO_DEVICE);
                }
        }
        dev_kfree_skb_irq(skb);
@@ -1750,8 +1770,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr,
        if (td_info->skb) {
                for (i = 0; i < td_info->nskb_dma; i++) {
                        if (td_info->skb_dma[i]) {
-                               pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
-                                       td_info->skb->len, PCI_DMA_TODEVICE);
+                               dma_unmap_single(vptr->dev, td_info->skb_dma[i],
+                                       td_info->skb->len, DMA_TO_DEVICE);
                                td_info->skb_dma[i] = 0;
                        }
                }
@@ -1809,7 +1829,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
                printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
                BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
                writew(TRDCSR_RUN, &regs->TDCSRClr);
-               netif_stop_queue(vptr->dev);
+               netif_stop_queue(vptr->netdev);
 
                /* FIXME: port over the pci_device_failed code and use it
                   here */
@@ -1850,10 +1870,10 @@ static void velocity_error(struct velocity_info *vptr, int status)
 
                if (linked) {
                        vptr->mii_status &= ~VELOCITY_LINK_FAIL;
-                       netif_carrier_on(vptr->dev);
+                       netif_carrier_on(vptr->netdev);
                } else {
                        vptr->mii_status |= VELOCITY_LINK_FAIL;
-                       netif_carrier_off(vptr->dev);
+                       netif_carrier_off(vptr->netdev);
                }
 
                velocity_print_link_status(vptr);
@@ -1867,9 +1887,9 @@ static void velocity_error(struct velocity_info *vptr, int status)
                enable_mii_autopoll(regs);
 
                if (vptr->mii_status & VELOCITY_LINK_FAIL)
-                       netif_stop_queue(vptr->dev);
+                       netif_stop_queue(vptr->netdev);
                else
-                       netif_wake_queue(vptr->dev);
+                       netif_wake_queue(vptr->netdev);
 
        }
        if (status & ISR_MIBFI)
@@ -1894,7 +1914,7 @@ static int velocity_tx_srv(struct velocity_info *vptr)
        int idx;
        int works = 0;
        struct velocity_td_info *tdinfo;
-       struct net_device_stats *stats = &vptr->dev->stats;
+       struct net_device_stats *stats = &vptr->netdev->stats;
 
        for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
                for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
@@ -1939,9 +1959,9 @@ static int velocity_tx_srv(struct velocity_info *vptr)
         *      Look to see if we should kick the transmit network
         *      layer for more work.
         */
-       if (netif_queue_stopped(vptr->dev) && (full == 0) &&
+       if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
            (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
-               netif_wake_queue(vptr->dev);
+               netif_wake_queue(vptr->netdev);
        }
        return works;
 }
@@ -1989,7 +2009,7 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
        if (pkt_size < rx_copybreak) {
                struct sk_buff *new_skb;
 
-               new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
+               new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
                if (new_skb) {
                        new_skb->ip_summed = rx_skb[0]->ip_summed;
                        skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
@@ -2029,15 +2049,14 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
  */
 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
 {
-       void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
-       struct net_device_stats *stats = &vptr->dev->stats;
+       struct net_device_stats *stats = &vptr->netdev->stats;
        struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
        struct rx_desc *rd = &(vptr->rx.ring[idx]);
        int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
        struct sk_buff *skb;
 
        if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
-               VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
+               VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name);
                stats->rx_length_errors++;
                return -EINVAL;
        }
@@ -2047,8 +2066,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
 
        skb = rd_info->skb;
 
-       pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
-                                   vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
+                                   vptr->rx.buf_sz, DMA_FROM_DEVICE);
 
        /*
         *      Drop frame not meeting IEEE 802.3
@@ -2061,21 +2080,20 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
                }
        }
 
-       pci_action = pci_dma_sync_single_for_device;
-
        velocity_rx_csum(rd, skb);
 
        if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
                velocity_iph_realign(vptr, skb, pkt_len);
-               pci_action = pci_unmap_single;
                rd_info->skb = NULL;
+               dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
+                                DMA_FROM_DEVICE);
+       } else {
+               dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
+                                          vptr->rx.buf_sz, DMA_FROM_DEVICE);
        }
 
-       pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
-                  PCI_DMA_FROMDEVICE);
-
        skb_put(skb, pkt_len - 4);
-       skb->protocol = eth_type_trans(skb, vptr->dev);
+       skb->protocol = eth_type_trans(skb, vptr->netdev);
 
        if (rd->rdesc0.RSR & RSR_DETAG) {
                u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
@@ -2100,7 +2118,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
  */
 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
 {
-       struct net_device_stats *stats = &vptr->dev->stats;
+       struct net_device_stats *stats = &vptr->netdev->stats;
        int rd_curr = vptr->rx.curr;
        int works = 0;
 
@@ -2235,15 +2253,15 @@ static int velocity_open(struct net_device *dev)
                goto out;
 
        /* Ensure chip is running */
-       pci_set_power_state(vptr->pdev, PCI_D0);
+       velocity_set_power_state(vptr, PCI_D0);
 
        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
 
-       ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
+       ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
                          dev->name, dev);
        if (ret < 0) {
                /* Power down the chip */
-               pci_set_power_state(vptr->pdev, PCI_D3hot);
+               velocity_set_power_state(vptr, PCI_D3hot);
                velocity_free_rings(vptr);
                goto out;
        }
@@ -2292,7 +2310,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
 
        if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
                VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
-                               vptr->dev->name);
+                               vptr->netdev->name);
                ret = -EINVAL;
                goto out_0;
        }
@@ -2314,8 +2332,9 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
                        goto out_0;
                }
 
-               tmp_vptr->dev = dev;
+               tmp_vptr->netdev = dev;
                tmp_vptr->pdev = vptr->pdev;
+               tmp_vptr->dev = vptr->dev;
                tmp_vptr->options = vptr->options;
                tmp_vptr->tx.numq = vptr->tx.numq;
 
@@ -2415,7 +2434,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
           saving then we need to bring the device back up to talk to it */
 
        if (!netif_running(dev))
-               pci_set_power_state(vptr->pdev, PCI_D0);
+               velocity_set_power_state(vptr, PCI_D0);
 
        switch (cmd) {
        case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
@@ -2428,7 +2447,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                ret = -EOPNOTSUPP;
        }
        if (!netif_running(dev))
-               pci_set_power_state(vptr->pdev, PCI_D3hot);
+               velocity_set_power_state(vptr, PCI_D3hot);
 
 
        return ret;
@@ -2494,7 +2513,7 @@ static int velocity_close(struct net_device *dev)
        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
                velocity_get_ip(vptr);
 
-       free_irq(vptr->pdev->irq, dev);
+       free_irq(dev->irq, dev);
 
        velocity_free_rings(vptr);
 
@@ -2550,7 +2569,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
         *      add it to the transmit ring.
         */
        tdinfo->skb = skb;
-       tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
+       tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
+                                                               DMA_TO_DEVICE);
        td_ptr->tdesc0.len = cpu_to_le16(pktlen);
        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
        td_ptr->td_buf[0].pa_high = 0;
@@ -2560,7 +2580,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
+               tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
                                                          frag, 0,
                                                          skb_frag_size(frag),
                                                          DMA_TO_DEVICE);
@@ -2632,12 +2652,9 @@ static const struct net_device_ops velocity_netdev_ops = {
  *     Set up the initial velocity_info struct for the device that has been
  *     discovered.
  */
-static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
-                              const struct velocity_info_tbl *info)
+static void velocity_init_info(struct velocity_info *vptr,
+                               const struct velocity_info_tbl *info)
 {
-       memset(vptr, 0, sizeof(struct velocity_info));
-
-       vptr->pdev = pdev;
        vptr->chip_id = info->chip_id;
        vptr->tx.numq = info->txqueue;
        vptr->multicast_limit = MCAM_SIZE;
@@ -2652,10 +2669,9 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
  *     Retrieve the PCI configuration space data that interests us from
  *     the kernel PCI layer
  */
-static int velocity_get_pci_info(struct velocity_info *vptr,
-                                struct pci_dev *pdev)
+static int velocity_get_pci_info(struct velocity_info *vptr)
 {
-       vptr->rev_id = pdev->revision;
+       struct pci_dev *pdev = vptr->pdev;
 
        pci_set_master(pdev);
 
@@ -2678,7 +2694,37 @@ static int velocity_get_pci_info(struct velocity_info *vptr,
                dev_err(&pdev->dev, "region #1 is too small.\n");
                return -EINVAL;
        }
-       vptr->pdev = pdev;
+
+       return 0;
+}
+
+/**
+ *     velocity_get_platform_info - retrieve platform info for device
+ *     @vptr: velocity device
+ *     @pdev: platform device it matches
+ *
+ *     Retrieve the Platform configuration data that interests us
+ */
+static int velocity_get_platform_info(struct velocity_info *vptr)
+{
+       struct resource res;
+       int ret;
+
+       if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
+               vptr->no_eeprom = 1;
+
+       ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
+       if (ret) {
+               dev_err(vptr->dev, "unable to find memory address\n");
+               return ret;
+       }
+
+       vptr->memaddr = res.start;
+
+       if (resource_size(&res) < VELOCITY_IO_SIZE) {
+               dev_err(vptr->dev, "memory region is too small.\n");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -2692,7 +2738,7 @@ static int velocity_get_pci_info(struct velocity_info *vptr,
  */
 static void velocity_print_info(struct velocity_info *vptr)
 {
-       struct net_device *dev = vptr->dev;
+       struct net_device *dev = vptr->netdev;
 
        printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
        printk(KERN_INFO "%s: Ethernet Address: %pM\n",
@@ -2707,21 +2753,22 @@ static u32 velocity_get_link(struct net_device *dev)
 }
 
 /**
- *     velocity_found1         -       set up discovered velocity card
+ *     velocity_probe - set up discovered velocity device
  *     @pdev: PCI device
  *     @ent: PCI device table entry that matched
+ *     @bustype: bus that device is connected to
  *
  *     Configure a discovered adapter from scratch. Return a negative
  *     errno error code on failure paths.
  */
-static int velocity_found1(struct pci_dev *pdev,
-                          const struct pci_device_id *ent)
+static int velocity_probe(struct device *dev, int irq,
+                          const struct velocity_info_tbl *info,
+                          enum velocity_bus_type bustype)
 {
        static int first = 1;
-       struct net_device *dev;
+       struct net_device *netdev;
        int i;
        const char *drv_string;
-       const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
        struct velocity_info *vptr;
        struct mac_regs __iomem *regs;
        int ret = -ENOMEM;
@@ -2730,20 +2777,18 @@ static int velocity_found1(struct pci_dev *pdev,
         * can support more than MAX_UNITS.
         */
        if (velocity_nics >= MAX_UNITS) {
-               dev_notice(&pdev->dev, "already found %d NICs.\n",
-                          velocity_nics);
+               dev_notice(dev, "already found %d NICs.\n", velocity_nics);
                return -ENODEV;
        }
 
-       dev = alloc_etherdev(sizeof(struct velocity_info));
-       if (!dev)
+       netdev = alloc_etherdev(sizeof(struct velocity_info));
+       if (!netdev)
                goto out;
 
        /* Chain it all together */
 
-       SET_NETDEV_DEV(dev, &pdev->dev);
-       vptr = netdev_priv(dev);
-
+       SET_NETDEV_DEV(netdev, dev);
+       vptr = netdev_priv(netdev);
 
        if (first) {
                printk(KERN_INFO "%s Ver. %s\n",
@@ -2753,41 +2798,41 @@ static int velocity_found1(struct pci_dev *pdev,
                first = 0;
        }
 
-       velocity_init_info(pdev, vptr, info);
-
+       netdev->irq = irq;
+       vptr->netdev = netdev;
        vptr->dev = dev;
 
-       ret = pci_enable_device(pdev);
-       if (ret < 0)
-               goto err_free_dev;
+       velocity_init_info(vptr, info);
 
-       ret = velocity_get_pci_info(vptr, pdev);
-       if (ret < 0) {
-               /* error message already printed */
-               goto err_disable;
-       }
+       if (bustype == BUS_PCI) {
+               vptr->pdev = to_pci_dev(dev);
 
-       ret = pci_request_regions(pdev, VELOCITY_NAME);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "No PCI resources.\n");
-               goto err_disable;
+               ret = velocity_get_pci_info(vptr);
+               if (ret < 0)
+                       goto err_free_dev;
+       } else {
+               vptr->pdev = NULL;
+               ret = velocity_get_platform_info(vptr);
+               if (ret < 0)
+                       goto err_free_dev;
        }
 
        regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
        if (regs == NULL) {
                ret = -EIO;
-               goto err_release_res;
+               goto err_free_dev;
        }
 
        vptr->mac_regs = regs;
+       vptr->rev_id = readb(&regs->rev_id);
 
        mac_wol_reset(regs);
 
        for (i = 0; i < 6; i++)
-               dev->dev_addr[i] = readb(&regs->PAR[i]);
+               netdev->dev_addr[i] = readb(&regs->PAR[i]);
 
 
-       drv_string = dev_driver_string(&pdev->dev);
+       drv_string = dev_driver_string(dev);
 
        velocity_get_options(&vptr->options, velocity_nics, drv_string);
 
@@ -2808,46 +2853,125 @@ static int velocity_found1(struct pci_dev *pdev,
 
        vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
 
-       dev->netdev_ops = &velocity_netdev_ops;
-       dev->ethtool_ops = &velocity_ethtool_ops;
-       netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
+       netdev->netdev_ops = &velocity_netdev_ops;
+       netdev->ethtool_ops = &velocity_ethtool_ops;
+       netif_napi_add(netdev, &vptr->napi, velocity_poll,
+                                                       VELOCITY_NAPI_WEIGHT);
 
-       dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+       netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
                           NETIF_F_HW_VLAN_CTAG_TX;
-       dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER |
-                        NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM;
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+                       NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
+                       NETIF_F_IP_CSUM;
 
-       ret = register_netdev(dev);
+       ret = register_netdev(netdev);
        if (ret < 0)
                goto err_iounmap;
 
-       if (!velocity_get_link(dev)) {
-               netif_carrier_off(dev);
+       if (!velocity_get_link(netdev)) {
+               netif_carrier_off(netdev);
                vptr->mii_status |= VELOCITY_LINK_FAIL;
        }
 
        velocity_print_info(vptr);
-       pci_set_drvdata(pdev, dev);
+       dev_set_drvdata(vptr->dev, netdev);
 
        /* and leave the chip powered down */
 
-       pci_set_power_state(pdev, PCI_D3hot);
+       velocity_set_power_state(vptr, PCI_D3hot);
        velocity_nics++;
 out:
        return ret;
 
 err_iounmap:
        iounmap(regs);
-err_release_res:
-       pci_release_regions(pdev);
-err_disable:
-       pci_disable_device(pdev);
 err_free_dev:
-       free_netdev(dev);
+       free_netdev(netdev);
        goto out;
 }
 
-#ifdef CONFIG_PM
+/**
+ *     velocity_remove - device unplug
+ *     @dev: device being removed
+ *
+ *     Device unload callback. Called on an unplug or on module
+ *     unload for each active device that is present. Disconnects
+ *     the device from the network layer and frees all the resources
+ */
+static int velocity_remove(struct device *dev)
+{
+       struct net_device *netdev = dev_get_drvdata(dev);
+       struct velocity_info *vptr = netdev_priv(netdev);
+
+       unregister_netdev(netdev);
+       iounmap(vptr->mac_regs);
+       free_netdev(netdev);
+       velocity_nics--;
+
+       return 0;
+}
+
+static int velocity_pci_probe(struct pci_dev *pdev,
+                              const struct pci_device_id *ent)
+{
+       const struct velocity_info_tbl *info =
+                                       &chip_info_table[ent->driver_data];
+       int ret;
+
+       ret = pci_enable_device(pdev);
+       if (ret < 0)
+               return ret;
+
+       ret = pci_request_regions(pdev, VELOCITY_NAME);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "No PCI resources.\n");
+               goto fail1;
+       }
+
+       ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
+       if (ret == 0)
+               return 0;
+
+       pci_release_regions(pdev);
+fail1:
+       pci_disable_device(pdev);
+       return ret;
+}
+
+static void velocity_pci_remove(struct pci_dev *pdev)
+{
+       velocity_remove(&pdev->dev);
+
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+}
+
+static int velocity_platform_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *of_id;
+       const struct velocity_info_tbl *info;
+       int irq;
+
+       of_id = of_match_device(velocity_of_ids, &pdev->dev);
+       if (!of_id)
+               return -EINVAL;
+       info = of_id->data;
+
+       irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (!irq)
+               return -EINVAL;
+
+       return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
+}
+
+static int velocity_platform_remove(struct platform_device *pdev)
+{
+       velocity_remove(&pdev->dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
 /**
  *     wol_calc_crc            -       WOL CRC
  *     @pattern: data pattern
@@ -3004,32 +3128,35 @@ static void velocity_save_context(struct velocity_info *vptr, struct velocity_co
 
 }
 
-static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
+static int velocity_suspend(struct device *dev)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct velocity_info *vptr = netdev_priv(dev);
+       struct net_device *netdev = dev_get_drvdata(dev);
+       struct velocity_info *vptr = netdev_priv(netdev);
        unsigned long flags;
 
-       if (!netif_running(vptr->dev))
+       if (!netif_running(vptr->netdev))
                return 0;
 
-       netif_device_detach(vptr->dev);
+       netif_device_detach(vptr->netdev);
 
        spin_lock_irqsave(&vptr->lock, flags);
-       pci_save_state(pdev);
+       if (vptr->pdev)
+               pci_save_state(vptr->pdev);
 
        if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
                velocity_get_ip(vptr);
                velocity_save_context(vptr, &vptr->context);
                velocity_shutdown(vptr);
                velocity_set_wol(vptr);
-               pci_enable_wake(pdev, PCI_D3hot, 1);
-               pci_set_power_state(pdev, PCI_D3hot);
+               if (vptr->pdev)
+                       pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
+               velocity_set_power_state(vptr, PCI_D3hot);
        } else {
                velocity_save_context(vptr, &vptr->context);
                velocity_shutdown(vptr);
-               pci_disable_device(pdev);
-               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+               if (vptr->pdev)
+                       pci_disable_device(vptr->pdev);
+               velocity_set_power_state(vptr, PCI_D3hot);
        }
 
        spin_unlock_irqrestore(&vptr->lock, flags);
@@ -3071,19 +3198,22 @@ static void velocity_restore_context(struct velocity_info *vptr, struct velocity
                writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
 }
 
-static int velocity_resume(struct pci_dev *pdev)
+static int velocity_resume(struct device *dev)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct velocity_info *vptr = netdev_priv(dev);
+       struct net_device *netdev = dev_get_drvdata(dev);
+       struct velocity_info *vptr = netdev_priv(netdev);
        unsigned long flags;
        int i;
 
-       if (!netif_running(vptr->dev))
+       if (!netif_running(vptr->netdev))
                return 0;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_enable_wake(pdev, 0, 0);
-       pci_restore_state(pdev);
+       velocity_set_power_state(vptr, PCI_D0);
+
+       if (vptr->pdev) {
+               pci_enable_wake(vptr->pdev, 0, 0);
+               pci_restore_state(vptr->pdev);
+       }
 
        mac_wol_reset(vptr->mac_regs);
 
@@ -3101,27 +3231,38 @@ static int velocity_resume(struct pci_dev *pdev)
 
        mac_enable_int(vptr->mac_regs);
        spin_unlock_irqrestore(&vptr->lock, flags);
-       netif_device_attach(vptr->dev);
+       netif_device_attach(vptr->netdev);
 
        return 0;
 }
-#endif
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
 
 /*
  *     Definition for our device driver. The PCI layer interface
  *     uses this to handle all our card discover and plugging
  */
-static struct pci_driver velocity_driver = {
+static struct pci_driver velocity_pci_driver = {
        .name           = VELOCITY_NAME,
-       .id_table       = velocity_id_table,
-       .probe          = velocity_found1,
-       .remove         = velocity_remove1,
-#ifdef CONFIG_PM
-       .suspend        = velocity_suspend,
-       .resume         = velocity_resume,
-#endif
+       .id_table       = velocity_pci_id_table,
+       .probe          = velocity_pci_probe,
+       .remove         = velocity_pci_remove,
+       .driver = {
+               .pm = &velocity_pm_ops,
+       },
 };
 
+static struct platform_driver velocity_platform_driver = {
+       .probe          = velocity_platform_probe,
+       .remove         = velocity_platform_remove,
+       .driver = {
+               .name = "via-velocity",
+               .owner = THIS_MODULE,
+               .of_match_table = velocity_of_ids,
+               .pm = &velocity_pm_ops,
+       },
+};
 
 /**
  *     velocity_ethtool_up     -       pre hook for ethtool
@@ -3134,7 +3275,7 @@ static int velocity_ethtool_up(struct net_device *dev)
 {
        struct velocity_info *vptr = netdev_priv(dev);
        if (!netif_running(dev))
-               pci_set_power_state(vptr->pdev, PCI_D0);
+               velocity_set_power_state(vptr, PCI_D0);
        return 0;
 }
 
@@ -3149,7 +3290,7 @@ static void velocity_ethtool_down(struct net_device *dev)
 {
        struct velocity_info *vptr = netdev_priv(dev);
        if (!netif_running(dev))
-               pci_set_power_state(vptr->pdev, PCI_D3hot);
+               velocity_set_power_state(vptr, PCI_D3hot);
 }
 
 static int velocity_get_settings(struct net_device *dev,
@@ -3269,9 +3410,14 @@ static int velocity_set_settings(struct net_device *dev,
 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct velocity_info *vptr = netdev_priv(dev);
+
        strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
        strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
-       strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
+       if (vptr->pdev)
+               strlcpy(info->bus_info, pci_name(vptr->pdev),
+                                               sizeof(info->bus_info));
+       else
+               strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
 }
 
 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3561,13 +3707,20 @@ static void velocity_unregister_notifier(void)
  */
 static int __init velocity_init_module(void)
 {
-       int ret;
+       int ret_pci, ret_platform;
 
        velocity_register_notifier();
-       ret = pci_register_driver(&velocity_driver);
-       if (ret < 0)
+
+       ret_pci = pci_register_driver(&velocity_pci_driver);
+       ret_platform = platform_driver_register(&velocity_platform_driver);
+
+       /* if both_registers failed, remove the notifier */
+       if ((ret_pci < 0) && (ret_platform < 0)) {
                velocity_unregister_notifier();
-       return ret;
+               return ret_pci;
+       }
+
+       return 0;
 }
 
 /**
@@ -3581,7 +3734,9 @@ static int __init velocity_init_module(void)
 static void __exit velocity_cleanup_module(void)
 {
        velocity_unregister_notifier();
-       pci_unregister_driver(&velocity_driver);
+
+       pci_unregister_driver(&velocity_pci_driver);
+       platform_driver_unregister(&velocity_platform_driver);
 }
 
 module_init(velocity_init_module);
index 4cb9f13485e957c2a510976841ac07023e774457..9453bfa9324a54fd41c86cabf3ca95c3536ed790 100644 (file)
@@ -1265,7 +1265,7 @@ struct velocity_context {
 #define PHYID_VT3216_64BIT  0x000FC600UL
 #define PHYID_MARVELL_1000  0x01410C50UL
 #define PHYID_MARVELL_1000S 0x01410C40UL
-
+#define PHYID_ICPLUS_IP101A 0x02430C54UL
 #define PHYID_REV_ID_MASK   0x0000000FUL
 
 #define PHYID_GET_PHY_ID(i)         ((i) & ~PHYID_REV_ID_MASK)
@@ -1434,8 +1434,10 @@ struct velocity_opt {
 #define GET_RD_BY_IDX(vptr, idx)   (vptr->rd_ring[idx])
 
 struct velocity_info {
+       struct device *dev;
        struct pci_dev *pdev;
-       struct net_device *dev;
+       struct net_device *netdev;
+       int no_eeprom;
 
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        u8 ip_addr[4];
@@ -1514,7 +1516,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr)
        int res = -ENOENT;
 
        rcu_read_lock();
-       in_dev = __in_dev_get_rcu(vptr->dev);
+       in_dev = __in_dev_get_rcu(vptr->netdev);
        if (in_dev != NULL) {
                ifa = (struct in_ifaddr *) in_dev->ifa_list;
                if (ifa != NULL) {
index a518dcab396e69cdbd96b8133bef82e237c5daf3..30fed08d1674377bf5b51d6461ef0b13b229cfff 100644 (file)
@@ -734,7 +734,6 @@ err_hw_probe:
        unregister_netdev(ndev);
 err_register:
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -750,7 +749,6 @@ static int w5100_remove(struct platform_device *pdev)
 
        unregister_netdev(ndev);
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 6e00e3f94ce4e4909d9b624b710f888039f68ded..e92884564e1e97c43c0a95769b8db950e244bfe5 100644 (file)
@@ -646,7 +646,6 @@ err_hw_probe:
        unregister_netdev(ndev);
 err_register:
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -662,7 +661,6 @@ static int w5300_remove(struct platform_device *pdev)
 
        unregister_netdev(ndev);
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 122d60c0481b0d8eab2a95b56bc495665390a0ec..7b90a5eba0995326ce6d808749c65aa6f77cf8e7 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_XILINX
        bool "Xilinx devices"
        default y
-       depends on PPC || PPC32 || MICROBLAZE
+       depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -20,7 +20,7 @@ if NET_VENDOR_XILINX
 
 config XILINX_EMACLITE
        tristate "Xilinx 10/100 Ethernet Lite support"
-       depends on (PPC32 || MICROBLAZE)
+       depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ)
        select PHYLIB
        ---help---
          This driver supports the 10/100 Ethernet Lite from Xilinx.
index 57c2e5ef2804ec5a89841557bb4e867c11cde5ec..58eb4488beff13201c411f5374f803cc730c7c8c 100644 (file)
@@ -1007,7 +1007,7 @@ static int temac_of_probe(struct platform_device *op)
                return -ENOMEM;
 
        ether_setup(ndev);
-       dev_set_drvdata(&op->dev, ndev);
+       platform_set_drvdata(op, ndev);
        SET_NETDEV_DEV(ndev, &op->dev);
        ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
        ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
@@ -1136,7 +1136,7 @@ static int temac_of_probe(struct platform_device *op)
 
 static int temac_of_remove(struct platform_device *op)
 {
-       struct net_device *ndev = dev_get_drvdata(&op->dev);
+       struct net_device *ndev = platform_get_drvdata(op);
        struct temac_local *lp = netdev_priv(ndev);
 
        temac_mdio_teardown(lp);
@@ -1145,7 +1145,6 @@ static int temac_of_remove(struct platform_device *op)
        if (lp->phy_node)
                of_node_put(lp->phy_node);
        lp->phy_node = NULL;
-       dev_set_drvdata(&op->dev, NULL);
        iounmap(lp->regs);
        if (lp->sdma_regs)
                iounmap(lp->sdma_regs);
index 24748e8367a1ad346a245bc890eb27bc8b659b0a..fb7d1c28a2ea345326c2b225915ca2676295a308 100644 (file)
@@ -1484,7 +1484,7 @@ static int axienet_of_probe(struct platform_device *op)
                return -ENOMEM;
 
        ether_setup(ndev);
-       dev_set_drvdata(&op->dev, ndev);
+       platform_set_drvdata(op, ndev);
 
        SET_NETDEV_DEV(ndev, &op->dev);
        ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
@@ -1622,7 +1622,7 @@ nodev:
 
 static int axienet_of_remove(struct platform_device *op)
 {
-       struct net_device *ndev = dev_get_drvdata(&op->dev);
+       struct net_device *ndev = platform_get_drvdata(op);
        struct axienet_local *lp = netdev_priv(ndev);
 
        axienet_mdio_teardown(lp);
@@ -1632,8 +1632,6 @@ static int axienet_of_remove(struct platform_device *op)
                of_node_put(lp->phy_node);
        lp->phy_node = NULL;
 
-       dev_set_drvdata(&op->dev, NULL);
-
        iounmap(lp->regs);
        if (lp->dma_regs)
                iounmap(lp->dma_regs);
index 919b983114e907242bc49ce64ba95eb00c3e2008..fd4dbdae5331a3cf31732d9691a1356b7d1abf1c 100644 (file)
@@ -2,9 +2,9 @@
  * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
  *
  * This is a new flat driver which is based on the original emac_lite
- * driver from John Williams <john.williams@petalogix.com>.
+ * driver from John Williams <john.williams@xilinx.com>.
  *
- * 2007-2009 (c) Xilinx, Inc.
+ * 2007 - 2013 (c) Xilinx, Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -159,34 +159,32 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
        u32 reg_data;
 
        /* Enable the Tx interrupts for the first Buffer */
-       reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
-       out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
-                reg_data | XEL_TSR_XMIT_IE_MASK);
+       reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+       __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+                    drvdata->base_addr + XEL_TSR_OFFSET);
 
        /* Enable the Tx interrupts for the second Buffer if
         * configured in HW */
        if (drvdata->tx_ping_pong != 0) {
-               reg_data = in_be32(drvdata->base_addr +
+               reg_data = __raw_readl(drvdata->base_addr +
                                   XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
-               out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
-                        XEL_TSR_OFFSET,
-                        reg_data | XEL_TSR_XMIT_IE_MASK);
+               __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+                            drvdata->base_addr + XEL_BUFFER_OFFSET +
+                            XEL_TSR_OFFSET);
        }
 
        /* Enable the Rx interrupts for the first buffer */
-       out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
-                XEL_RSR_RECV_IE_MASK);
+       __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
 
        /* Enable the Rx interrupts for the second Buffer if
         * configured in HW */
        if (drvdata->rx_ping_pong != 0) {
-               out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
-                        XEL_RSR_OFFSET,
-                        XEL_RSR_RECV_IE_MASK);
+               __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr +
+                            XEL_BUFFER_OFFSET + XEL_RSR_OFFSET);
        }
 
        /* Enable the Global Interrupt Enable */
-       out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
+       __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
 }
 
 /**
@@ -201,37 +199,37 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
        u32 reg_data;
 
        /* Disable the Global Interrupt Enable */
-       out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
+       __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
 
        /* Disable the Tx interrupts for the first buffer */
-       reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
-       out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
-                reg_data & (~XEL_TSR_XMIT_IE_MASK));
+       reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+       __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+                    drvdata->base_addr + XEL_TSR_OFFSET);
 
        /* Disable the Tx interrupts for the second Buffer
         * if configured in HW */
        if (drvdata->tx_ping_pong != 0) {
-               reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+               reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
                                   XEL_TSR_OFFSET);
-               out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
-                        XEL_TSR_OFFSET,
-                        reg_data & (~XEL_TSR_XMIT_IE_MASK));
+               __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+                            drvdata->base_addr + XEL_BUFFER_OFFSET +
+                            XEL_TSR_OFFSET);
        }
 
        /* Disable the Rx interrupts for the first buffer */
-       reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET);
-       out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
-                reg_data & (~XEL_RSR_RECV_IE_MASK));
+       reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+       __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+                    drvdata->base_addr + XEL_RSR_OFFSET);
 
        /* Disable the Rx interrupts for the second buffer
         * if configured in HW */
        if (drvdata->rx_ping_pong != 0) {
 
-               reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+               reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
                                   XEL_RSR_OFFSET);
-               out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
-                        XEL_RSR_OFFSET,
-                        reg_data & (~XEL_RSR_RECV_IE_MASK));
+               __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+                            drvdata->base_addr + XEL_BUFFER_OFFSET +
+                            XEL_RSR_OFFSET);
        }
 }
 
@@ -351,7 +349,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
                byte_count = ETH_FRAME_LEN;
 
        /* Check if the expected buffer is available */
-       reg_data = in_be32(addr + XEL_TSR_OFFSET);
+       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
        if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
             XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
 
@@ -364,7 +362,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
 
                addr = (void __iomem __force *)((u32 __force)addr ^
                                                 XEL_BUFFER_OFFSET);
-               reg_data = in_be32(addr + XEL_TSR_OFFSET);
+               reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
 
                if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
                     XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -375,15 +373,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
        /* Write the frame to the buffer */
        xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
 
-       out_be32(addr + XEL_TPLR_OFFSET, (byte_count & XEL_TPLR_LENGTH_MASK));
+       __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+                    addr + XEL_TPLR_OFFSET);
 
        /* Update the Tx Status Register to indicate that there is a
         * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
         * is used by the interrupt handler to check whether a frame
         * has been transmitted */
-       reg_data = in_be32(addr + XEL_TSR_OFFSET);
+       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
        reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
-       out_be32(addr + XEL_TSR_OFFSET, reg_data);
+       __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
 
        return 0;
 }
@@ -408,7 +407,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
        addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
 
        /* Verify which buffer has valid data */
-       reg_data = in_be32(addr + XEL_RSR_OFFSET);
+       reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
 
        if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
                if (drvdata->rx_ping_pong != 0)
@@ -425,14 +424,14 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
                        return 0;       /* No data was available */
 
                /* Verify that buffer has valid data */
-               reg_data = in_be32(addr + XEL_RSR_OFFSET);
+               reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
                if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
                     XEL_RSR_RECV_DONE_MASK)
                        return 0;       /* No data was available */
        }
 
        /* Get the protocol type of the ethernet frame that arrived */
-       proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET +
+       proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
                        XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
                        XEL_RPLR_LENGTH_MASK);
 
@@ -441,7 +440,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
        if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
 
                if (proto_type == ETH_P_IP) {
-                       length = ((ntohl(in_be32(addr +
+                       length = ((ntohl(__raw_readl(addr +
                                        XEL_HEADER_IP_LENGTH_OFFSET +
                                        XEL_RXBUFF_OFFSET)) >>
                                        XEL_HEADER_SHIFT) &
@@ -463,9 +462,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
                                data, length);
 
        /* Acknowledge the frame */
-       reg_data = in_be32(addr + XEL_RSR_OFFSET);
+       reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
        reg_data &= ~XEL_RSR_RECV_DONE_MASK;
-       out_be32(addr + XEL_RSR_OFFSET, reg_data);
+       __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
 
        return length;
 }
@@ -492,14 +491,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
 
        xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
 
-       out_be32(addr + XEL_TPLR_OFFSET, ETH_ALEN);
+       __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
 
        /* Update the MAC address in the EmacLite */
-       reg_data = in_be32(addr + XEL_TSR_OFFSET);
-       out_be32(addr + XEL_TSR_OFFSET, reg_data | XEL_TSR_PROG_MAC_ADDR);
+       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+       __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
 
        /* Wait for EmacLite to finish with the MAC address update */
-       while ((in_be32(addr + XEL_TSR_OFFSET) &
+       while ((__raw_readl(addr + XEL_TSR_OFFSET) &
                XEL_TSR_PROG_MAC_ADDR) != 0)
                ;
 }
@@ -669,31 +668,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
        u32 tx_status;
 
        /* Check if there is Rx Data available */
-       if ((in_be32(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) ||
-                       (in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+       if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+                        XEL_RSR_RECV_DONE_MASK) ||
+           (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
                         & XEL_RSR_RECV_DONE_MASK))
 
                xemaclite_rx_handler(dev);
 
        /* Check if the Transmission for the first buffer is completed */
-       tx_status = in_be32(base_addr + XEL_TSR_OFFSET);
+       tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
        if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
                (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-               out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
+               __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
 
                tx_complete = true;
        }
 
        /* Check if the Transmission for the second buffer is completed */
-       tx_status = in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+       tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
        if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
                (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-               out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
-                        tx_status);
+               __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+                            XEL_TSR_OFFSET);
 
                tx_complete = true;
        }
@@ -726,7 +726,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
        /* wait for the MDIO interface to not be busy or timeout
           after some time.
        */
-       while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+       while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
                        XEL_MDIOCTRL_MDIOSTS_MASK) {
                if (end - jiffies <= 0) {
                        WARN_ON(1);
@@ -762,17 +762,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
         * MDIO Address register. Set the Status bit in the MDIO Control
         * register to start a MDIO read transaction.
         */
-       ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-       out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
-                XEL_MDIOADDR_OP_MASK |
-                ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
-       out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
-                ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+       ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       __raw_writel(XEL_MDIOADDR_OP_MASK |
+                    ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+                    lp->base_addr + XEL_MDIOADDR_OFFSET);
+       __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        if (xemaclite_mdio_wait(lp))
                return -ETIMEDOUT;
 
-       rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
+       rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
 
        dev_dbg(&lp->ndev->dev,
                "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -809,13 +809,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
         * Data register. Finally, set the Status bit in the MDIO Control
         * register to start a MDIO write transaction.
         */
-       ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-       out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
-                ~XEL_MDIOADDR_OP_MASK &
-                ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
-       out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
-       out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
-                ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+       ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       __raw_writel(~XEL_MDIOADDR_OP_MASK &
+                    ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+                    lp->base_addr + XEL_MDIOADDR_OFFSET);
+       __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+       __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        return 0;
 }
@@ -848,24 +848,39 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
        int rc;
        struct resource res;
        struct device_node *np = of_get_parent(lp->phy_node);
+       struct device_node *npp;
 
        /* Don't register the MDIO bus if the phy_node or its parent node
         * can't be found.
         */
-       if (!np)
+       if (!np) {
+               dev_err(dev, "Failed to register mdio bus.\n");
                return -ENODEV;
+       }
+       npp = of_get_parent(np);
+
+       of_address_to_resource(npp, 0, &res);
+       if (lp->ndev->mem_start != res.start) {
+               struct phy_device *phydev;
+               phydev = of_phy_find_device(lp->phy_node);
+               if (!phydev)
+                       dev_info(dev,
+                                "MDIO of the phy is not registered yet\n");
+               return 0;
+       }
 
        /* Enable the MDIO bus by asserting the enable bit in MDIO Control
         * register.
         */
-       out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
-                XEL_MDIOCTRL_MDIOEN_MASK);
+       __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        bus = mdiobus_alloc();
-       if (!bus)
+       if (!bus) {
+               dev_err(dev, "Failed to allocate mdiobus\n");
                return -ENOMEM;
+       }
 
-       of_address_to_resource(np, 0, &res);
        snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
                 (unsigned long long)res.start);
        bus->priv = lp;
@@ -879,8 +894,10 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
        lp->mii_bus = bus;
 
        rc = of_mdiobus_register(bus, np);
-       if (rc)
+       if (rc) {
+               dev_err(dev, "Failed to register mdio bus.\n");
                goto err_register;
+       }
 
        return 0;
 
@@ -896,7 +913,7 @@ err_register:
  * There's nothing in the Emaclite device to be configured when the link
  * state changes. We just print the status.
  */
-void xemaclite_adjust_link(struct net_device *ndev)
+static void xemaclite_adjust_link(struct net_device *ndev)
 {
        struct net_local *lp = netdev_priv(ndev);
        struct phy_device *phy = lp->phy_dev;
@@ -946,7 +963,8 @@ static int xemaclite_open(struct net_device *dev)
                phy_write(lp->phy_dev, MII_CTRL1000, 0);
 
                /* Advertise only 10 and 100mbps full/half duplex speeds */
-               phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+               phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
+                         ADVERTISE_CSMA);
 
                /* Restart auto negotiation */
                bmcr = phy_read(lp->phy_dev, MII_BMCR);
@@ -1057,13 +1075,14 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
  * This function un maps the IO region of the Emaclite device and frees the net
  * device.
  */
-static void xemaclite_remove_ndev(struct net_device *ndev)
+static void xemaclite_remove_ndev(struct net_device *ndev,
+                                 struct platform_device *pdev)
 {
        if (ndev) {
                struct net_local *lp = netdev_priv(ndev);
 
                if (lp->base_addr)
-                       iounmap((void __iomem __force *) (lp->base_addr));
+                       devm_iounmap(&pdev->dev, lp->base_addr);
                free_netdev(ndev);
        }
 }
@@ -1109,8 +1128,7 @@ static struct net_device_ops xemaclite_netdev_ops;
  */
 static int xemaclite_of_probe(struct platform_device *ofdev)
 {
-       struct resource r_irq; /* Interrupt resources */
-       struct resource r_mem; /* IO mem resources */
+       struct resource *res;
        struct net_device *ndev = NULL;
        struct net_local *lp = NULL;
        struct device *dev = &ofdev->dev;
@@ -1120,20 +1138,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
 
        dev_info(dev, "Device Tree Probing\n");
 
-       /* Get iospace for the device */
-       rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
-       if (rc) {
-               dev_err(dev, "invalid address\n");
-               return rc;
-       }
-
-       /* Get IRQ for the device */
-       rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
-       if (!rc) {
-               dev_err(dev, "no IRQ found\n");
-               return rc;
-       }
-
        /* Create an ethernet device instance */
        ndev = alloc_etherdev(sizeof(struct net_local));
        if (!ndev)
@@ -1142,30 +1146,28 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        dev_set_drvdata(dev, ndev);
        SET_NETDEV_DEV(ndev, &ofdev->dev);
 
-       ndev->irq = r_irq.start;
-       ndev->mem_start = r_mem.start;
-       ndev->mem_end = r_mem.end;
-
        lp = netdev_priv(ndev);
        lp->ndev = ndev;
 
-       if (!request_mem_region(ndev->mem_start,
-                               ndev->mem_end - ndev->mem_start + 1,
-                               DRIVER_NAME)) {
-               dev_err(dev, "Couldn't lock memory region at %p\n",
-                       (void *)ndev->mem_start);
-               rc = -EBUSY;
-               goto error2;
+       /* Get IRQ for the device */
+       res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev, "no IRQ found\n");
+               goto error;
        }
 
-       /* Get the virtual base address for the device */
-       lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
-       if (NULL == lp->base_addr) {
-               dev_err(dev, "EmacLite: Could not allocate iomem\n");
-               rc = -EIO;
-               goto error1;
+       ndev->irq = res->start;
+
+       res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+       lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
+       if (IS_ERR(lp->base_addr)) {
+               rc = PTR_ERR(lp->base_addr);
+               goto error;
        }
 
+       ndev->mem_start = res->start;
+       ndev->mem_end = res->end;
+
        spin_lock_init(&lp->reset_lock);
        lp->next_tx_buf_to_use = 0x0;
        lp->next_rx_buf_to_use = 0x0;
@@ -1180,8 +1182,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
                dev_warn(dev, "No MAC address found\n");
 
        /* Clear the Tx CSR's in case this is a restart */
-       out_be32(lp->base_addr + XEL_TSR_OFFSET, 0);
-       out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
+       __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+       __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
 
        /* Set the MAC address in the EmacLite device */
        xemaclite_update_address(lp, ndev->dev_addr);
@@ -1202,7 +1204,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        if (rc) {
                dev_err(dev,
                        "Cannot register network device, aborting\n");
-               goto error1;
+               goto error;
        }
 
        dev_info(dev,
@@ -1211,11 +1213,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
                 (unsigned int __force)lp->base_addr, ndev->irq);
        return 0;
 
-error1:
-       release_mem_region(ndev->mem_start, resource_size(&r_mem));
-
-error2:
-       xemaclite_remove_ndev(ndev);
+error:
+       xemaclite_remove_ndev(ndev, ofdev);
        return rc;
 }
 
@@ -1250,9 +1249,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
                of_node_put(lp->phy_node);
        lp->phy_node = NULL;
 
-       release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
-
-       xemaclite_remove_ndev(ndev);
+       xemaclite_remove_ndev(ndev, of_dev);
        dev_set_drvdata(dev, NULL);
 
        return 0;
index 6958a5e87703bb3625b917bcdecfe8a7ef50e7e9..3d689fcb7917bd62e193fc1c8414d2a9075e0be5 100644 (file)
@@ -1472,7 +1472,6 @@ err_phy_dis:
        phy_disconnect(port->phydev);
 err_free_mem:
        npe_port_tab[NPE_ID(port->id)] = NULL;
-       platform_set_drvdata(pdev, NULL);
        release_resource(port->mem_res);
 err_npe_rel:
        npe_release(port->npe);
@@ -1489,7 +1488,6 @@ static int eth_remove_one(struct platform_device *pdev)
        unregister_netdev(dev);
        phy_disconnect(port->phydev);
        npe_port_tab[NPE_ID(port->id)] = NULL;
-       platform_set_drvdata(pdev, NULL);
        npe_release(port->npe);
        release_resource(port->mem_res);
        free_netdev(dev);
index d5bd563ac131e59362b7df5c33b25f5266f9370f..f5d7305a5784174f9868f045d186e9c90d2bbc20 100644 (file)
@@ -2246,15 +2246,4 @@ static struct pci_driver skfddi_pci_driver = {
        .remove         = skfp_remove_one,
 };
 
-static int __init skfd_init(void)
-{
-       return pci_register_driver(&skfddi_pci_driver);
-}
-
-static void __exit skfd_exit(void)
-{
-       pci_unregister_driver(&skfddi_pci_driver);
-}
-
-module_init(skfd_init);
-module_exit(skfd_exit);
+module_pci_driver(skfddi_pci_driver);
index 02de6c891670b87b9f19ef9a418b27ce39fa745c..f91bf0ddf031a077b89d961ca5cb16c87f8321ac 100644 (file)
@@ -103,7 +103,7 @@ static struct packet_type bpq_packet_type __read_mostly = {
 };
 
 static struct notifier_block bpq_dev_notifier = {
-       .notifier_call =bpq_device_event,
+       .notifier_call = bpq_device_event,
 };
 
 
@@ -544,9 +544,10 @@ static void bpq_free_device(struct net_device *ndev)
 /*
  *     Handle device status changes.
  */
-static int bpq_device_event(struct notifier_block *this,unsigned long event, void *ptr)
+static int bpq_device_event(struct notifier_block *this,
+                           unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
index 3c4d6274bb9b25527aa2af7e1daf4f07fe3a84d0..00ed75155ce8d4b7faff9ca9f3a98c28d355ff4c 100644 (file)
@@ -1686,15 +1686,4 @@ static struct pci_driver rr_driver = {
        .remove         = rr_remove_one,
 };
 
-static int __init rr_init_module(void)
-{
-       return pci_register_driver(&rr_driver);
-}
-
-static void __exit rr_cleanup_module(void)
-{
-       pci_unregister_driver(&rr_driver);
-}
-
-module_init(rr_init_module);
-module_exit(rr_cleanup_module);
+module_pci_driver(rr_driver);
index 088c554961918beeec45dd9c6d22d2508409e92c..4dccead586bee57a076be015478809ea82e7157e 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/inetdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 #include <linux/in.h>
 #include <linux/slab.h>
 #include <net/arp.h>
@@ -284,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
 
        skb->protocol = eth_type_trans(skb, net);
        skb->ip_summed = CHECKSUM_NONE;
-       skb->vlan_tci = packet->vlan_tci;
+       if (packet->vlan_tci & VLAN_TAG_PRESENT)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      packet->vlan_tci);
 
        net->stats.rx_packets++;
        net->stats.rx_bytes += packet->total_data_buflen;
index 22b4527321b1cf6a99140f8845ba40137f8cf451..c74f384c87d50a0314b84e2808ab735c00ee8684 100644 (file)
@@ -794,7 +794,6 @@ static int bfin_sir_remove(struct platform_device *pdev)
        kfree(self->rx_buff.head);
        free_netdev(dev);
        kfree(sir_port);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 9448587de4531c79517f05d0928b36163d474325..4455425f1c777865676150fa5da63b94f0ffc736 100644 (file)
@@ -838,7 +838,6 @@ static int sh_irda_remove(struct platform_device *pdev)
        sh_irda_remove_iobuf(self);
        iounmap(self->membase);
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 24aefcd840654aa10ca97cee1f7b323d9d7a404e..89682b49900ff2cac834c85cc58429ec4fb65ec3 100644 (file)
@@ -796,7 +796,6 @@ static int sh_sir_remove(struct platform_device *pdev)
        sh_sir_remove_iobuf(self);
        iounmap(self->membase);
        free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index d5a141c7c4e786b06746c73668407c110e74611e..18373b6ae37d78543cf925e2d86efab1578f3019 100644 (file)
@@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
        }
 
        if (port->passthru)
-               vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+               vlan = list_first_or_null_rcu(&port->vlans,
+                                             struct macvlan_dev, list);
        else
                vlan = macvlan_hash_lookup(port, eth->h_dest);
        if (vlan == NULL)
@@ -637,6 +638,14 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
        return __ethtool_get_settings(vlan->lowerdev, cmd);
 }
 
+static netdev_features_t macvlan_fix_features(struct net_device *dev,
+                                             netdev_features_t features)
+{
+       struct macvlan_dev *vlan = netdev_priv(dev);
+
+       return features & (vlan->set_features | ~MACVLAN_FEATURES);
+}
+
 static const struct ethtool_ops macvlan_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_settings           = macvlan_ethtool_get_settings,
@@ -650,6 +659,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_stop               = macvlan_stop,
        .ndo_start_xmit         = macvlan_start_xmit,
        .ndo_change_mtu         = macvlan_change_mtu,
+       .ndo_fix_features       = macvlan_fix_features,
        .ndo_change_rx_flags    = macvlan_change_rx_flags,
        .ndo_set_mac_address    = macvlan_set_mac_address,
        .ndo_set_rx_mode        = macvlan_set_mac_lists,
@@ -790,6 +800,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->port     = port;
        vlan->receive  = receive;
        vlan->forward  = forward;
+       vlan->set_features = MACVLAN_FEATURES;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
@@ -814,7 +825,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        if (err < 0)
                goto upper_dev_unlink;
 
-       list_add_tail(&vlan->list, &port->vlans);
+       list_add_tail_rcu(&vlan->list, &port->vlans);
        netif_stacked_transfer_operstate(lowerdev, dev);
 
        return 0;
@@ -842,7 +853,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
 
-       list_del(&vlan->list);
+       list_del_rcu(&vlan->list);
        unregister_netdevice_queue(dev, head);
        netdev_upper_dev_unlink(vlan->lowerdev, dev);
 }
@@ -852,18 +863,24 @@ static int macvlan_changelink(struct net_device *dev,
                struct nlattr *tb[], struct nlattr *data[])
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
-       if (data && data[IFLA_MACVLAN_MODE])
-               vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+
        if (data && data[IFLA_MACVLAN_FLAGS]) {
                __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
                bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
-
-               if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
-                       dev_set_promiscuity(vlan->lowerdev, -1);
-               else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
-                       dev_set_promiscuity(vlan->lowerdev, 1);
+               if (vlan->port->passthru && promisc) {
+                       int err;
+
+                       if (flags & MACVLAN_FLAG_NOPROMISC)
+                               err = dev_set_promiscuity(vlan->lowerdev, -1);
+                       else
+                               err = dev_set_promiscuity(vlan->lowerdev, 1);
+                       if (err < 0)
+                               return err;
+               }
                vlan->flags = flags;
        }
+       if (data && data[IFLA_MACVLAN_MODE])
+               vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
        return 0;
 }
 
@@ -920,7 +937,7 @@ static struct rtnl_link_ops macvlan_link_ops = {
 static int macvlan_device_event(struct notifier_block *unused,
                                unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct macvlan_dev *vlan, *next;
        struct macvlan_port *port;
        LIST_HEAD(list_kill);
index 59e9605de316809a702951a5c6de3dbee3a43a69..5bfaecdd23548a2c932a70b1900d45dbf56392a8 100644 (file)
  * macvtap_proto is used to allocate queues through the sock allocation
  * mechanism.
  *
- * TODO: multiqueue support is currently not implemented, even though
- * macvtap is basically prepared for that. We will need to add this
- * here as well as in virtio-net and qemu to get line rate on 10gbit
- * adapters from a guest.
  */
 struct macvtap_queue {
        struct sock sk;
@@ -44,6 +40,9 @@ struct macvtap_queue {
        struct macvlan_dev __rcu *vlan;
        struct file *file;
        unsigned int flags;
+       u16 queue_index;
+       bool enabled;
+       struct list_head next;
 };
 
 static struct proto macvtap_proto = {
@@ -66,11 +65,14 @@ static struct cdev macvtap_cdev;
 
 static const struct proto_ops macvtap_socket_ops;
 
+#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+                     NETIF_F_TSO6 | NETIF_F_UFO)
+#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
 /*
  * RCU usage:
  * The macvtap_queue and the macvlan_dev are loosely coupled, the
  * pointers from one to the other can only be read while rcu_read_lock
- * or macvtap_lock is held.
+ * or rtnl is held.
  *
  * Both the file and the macvlan_dev hold a reference on the macvtap_queue
  * through sock_hold(&q->sk). When the macvlan_dev goes away first,
@@ -82,54 +84,84 @@ static const struct proto_ops macvtap_socket_ops;
  * file or the dev. The data structure is freed through __sk_free
  * when both our references and any pending SKBs are gone.
  */
-static DEFINE_SPINLOCK(macvtap_lock);
 
-/*
- * get_slot: return a [unused/occupied] slot in vlan->taps[]:
- *     - if 'q' is NULL, return the first empty slot;
- *     - otherwise, return the slot this pointer occupies.
- */
-static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
+static int macvtap_enable_queue(struct net_device *dev, struct file *file,
+                               struct macvtap_queue *q)
 {
-       int i;
+       struct macvlan_dev *vlan = netdev_priv(dev);
+       int err = -EINVAL;
 
-       for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
-               if (rcu_dereference_protected(vlan->taps[i],
-                                             lockdep_is_held(&macvtap_lock)) == q)
-                       return i;
-       }
+       ASSERT_RTNL();
+
+       if (q->enabled)
+               goto out;
 
-       /* Should never happen */
-       BUG_ON(1);
+       err = 0;
+       rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
+       q->queue_index = vlan->numvtaps;
+       q->enabled = true;
+
+       vlan->numvtaps++;
+out:
+       return err;
 }
 
 static int macvtap_set_queue(struct net_device *dev, struct file *file,
-                               struct macvtap_queue *q)
+                            struct macvtap_queue *q)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
-       int index;
        int err = -EBUSY;
 
-       spin_lock(&macvtap_lock);
-       if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
+       rtnl_lock();
+       if (vlan->numqueues == MAX_MACVTAP_QUEUES)
                goto out;
 
        err = 0;
-       index = get_slot(vlan, NULL);
        rcu_assign_pointer(q->vlan, vlan);
-       rcu_assign_pointer(vlan->taps[index], q);
+       rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
        sock_hold(&q->sk);
 
        q->file = file;
+       q->queue_index = vlan->numvtaps;
+       q->enabled = true;
        file->private_data = q;
+       list_add_tail(&q->next, &vlan->queue_list);
 
        vlan->numvtaps++;
+       vlan->numqueues++;
 
 out:
-       spin_unlock(&macvtap_lock);
+       rtnl_unlock();
        return err;
 }
 
+static int macvtap_disable_queue(struct macvtap_queue *q)
+{
+       struct macvlan_dev *vlan;
+       struct macvtap_queue *nq;
+
+       ASSERT_RTNL();
+       if (!q->enabled)
+               return -EINVAL;
+
+       vlan = rtnl_dereference(q->vlan);
+
+       if (vlan) {
+               int index = q->queue_index;
+               BUG_ON(index >= vlan->numvtaps);
+               nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
+               nq->queue_index = index;
+
+               rcu_assign_pointer(vlan->taps[index], nq);
+               RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
+               q->enabled = false;
+
+               vlan->numvtaps--;
+       }
+
+       return 0;
+}
+
 /*
  * The file owning the queue got closed, give up both
  * the reference that the files holds as well as the
@@ -142,19 +174,20 @@ static void macvtap_put_queue(struct macvtap_queue *q)
 {
        struct macvlan_dev *vlan;
 
-       spin_lock(&macvtap_lock);
-       vlan = rcu_dereference_protected(q->vlan,
-                                        lockdep_is_held(&macvtap_lock));
+       rtnl_lock();
+       vlan = rtnl_dereference(q->vlan);
+
        if (vlan) {
-               int index = get_slot(vlan, q);
+               if (q->enabled)
+                       BUG_ON(macvtap_disable_queue(q));
 
-               RCU_INIT_POINTER(vlan->taps[index], NULL);
+               vlan->numqueues--;
                RCU_INIT_POINTER(q->vlan, NULL);
                sock_put(&q->sk);
-               --vlan->numvtaps;
+               list_del_init(&q->next);
        }
 
-       spin_unlock(&macvtap_lock);
+       rtnl_unlock();
 
        synchronize_rcu();
        sock_put(&q->sk);
@@ -172,7 +205,12 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct macvtap_queue *tap = NULL;
-       int numvtaps = vlan->numvtaps;
+       /* Access to taps array is protected by rcu, but access to numvtaps
+        * isn't. Below we use it to lookup a queue, but treat it as a hint
+        * and validate that the result isn't NULL - in case we are
+        * racing against queue removal.
+        */
+       int numvtaps = ACCESS_ONCE(vlan->numvtaps);
        __u32 rxq;
 
        if (!numvtaps)
@@ -182,8 +220,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
        rxq = skb_get_rxhash(skb);
        if (rxq) {
                tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
-               if (tap)
-                       goto out;
+               goto out;
        }
 
        if (likely(skb_rx_queue_recorded(skb))) {
@@ -193,17 +230,10 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
                        rxq -= numvtaps;
 
                tap = rcu_dereference(vlan->taps[rxq]);
-               if (tap)
-                       goto out;
-       }
-
-       /* Everything failed - find first available queue */
-       for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
-               tap = rcu_dereference(vlan->taps[rxq]);
-               if (tap)
-                       break;
+               goto out;
        }
 
+       tap = rcu_dereference(vlan->taps[0]);
 out:
        return tap;
 }
@@ -216,27 +246,24 @@ out:
 static void macvtap_del_queues(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
-       struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
+       struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
        int i, j = 0;
 
-       /* macvtap_put_queue can free some slots, so go through all slots */
-       spin_lock(&macvtap_lock);
-       for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
-               q = rcu_dereference_protected(vlan->taps[i],
-                                             lockdep_is_held(&macvtap_lock));
-               if (q) {
-                       qlist[j++] = q;
-                       RCU_INIT_POINTER(vlan->taps[i], NULL);
-                       RCU_INIT_POINTER(q->vlan, NULL);
+       ASSERT_RTNL();
+       list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
+               list_del_init(&q->next);
+               qlist[j++] = q;
+               RCU_INIT_POINTER(q->vlan, NULL);
+               if (q->enabled)
                        vlan->numvtaps--;
-               }
+               vlan->numqueues--;
        }
-       BUG_ON(vlan->numvtaps != 0);
+       for (i = 0; i < vlan->numvtaps; i++)
+               RCU_INIT_POINTER(vlan->taps[i], NULL);
+       BUG_ON(vlan->numvtaps);
+       BUG_ON(vlan->numqueues);
        /* guarantee that any future macvtap_set_queue will fail */
        vlan->numvtaps = MAX_MACVTAP_QUEUES;
-       spin_unlock(&macvtap_lock);
-
-       synchronize_rcu();
 
        for (--j; j >= 0; j--)
                sock_put(&qlist[j]->sk);
@@ -249,14 +276,44 @@ static void macvtap_del_queues(struct net_device *dev)
  */
 static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 {
+       struct macvlan_dev *vlan = netdev_priv(dev);
        struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+       netdev_features_t features;
        if (!q)
                goto drop;
 
        if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
                goto drop;
 
-       skb_queue_tail(&q->sk.sk_receive_queue, skb);
+       skb->dev = dev;
+       /* Apply the forward feature mask so that we perform segmentation
+        * according to users wishes.
+        */
+       features = netif_skb_features(skb) & vlan->tap_features;
+       if (netif_needs_gso(skb, features)) {
+               struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+
+               if (IS_ERR(segs))
+                       goto drop;
+
+               if (!segs) {
+                       skb_queue_tail(&q->sk.sk_receive_queue, skb);
+                       goto wake_up;
+               }
+
+               kfree_skb(skb);
+               while (segs) {
+                       struct sk_buff *nskb = segs->next;
+
+                       segs->next = NULL;
+                       skb_queue_tail(&q->sk.sk_receive_queue, segs);
+                       segs = nskb;
+               }
+       } else {
+               skb_queue_tail(&q->sk.sk_receive_queue, skb);
+       }
+
+wake_up:
        wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
        return NET_RX_SUCCESS;
 
@@ -322,6 +379,14 @@ static int macvtap_newlink(struct net *src_net,
                           struct nlattr *tb[],
                           struct nlattr *data[])
 {
+       struct macvlan_dev *vlan = netdev_priv(dev);
+       INIT_LIST_HEAD(&vlan->queue_list);
+
+       /* Since macvlan supports all offloads by default, make
+        * tap support all offloads also.
+        */
+       vlan->tap_features = TUN_OFFLOADS;
+
        /* Don't put anything that may fail after macvlan_common_newlink
         * because we can't undo what it does.
         */
@@ -385,7 +450,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
        if (!q)
                goto out;
 
-       q->sock.wq = &q->wq;
+       RCU_INIT_POINTER(q->sock.wq, &q->wq);
        init_waitqueue_head(&q->wq.wait);
        q->sock.type = SOCK_RAW;
        q->sock.state = SS_CONNECTED;
@@ -727,8 +792,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
 
        skb_probe_transport_header(skb, ETH_HLEN);
 
-       rcu_read_lock_bh();
-       vlan = rcu_dereference_bh(q->vlan);
+       rcu_read_lock();
+       vlan = rcu_dereference(q->vlan);
        /* copy skb_ubuf_info for callback when skb has no error */
        if (zerocopy) {
                skb_shinfo(skb)->destructor_arg = m->msg_control;
@@ -739,7 +804,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                macvlan_start_xmit(skb, vlan->dev);
        else
                kfree_skb(skb);
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return total_len;
 
@@ -747,11 +812,11 @@ err_kfree:
        kfree_skb(skb);
 
 err:
-       rcu_read_lock_bh();
-       vlan = rcu_dereference_bh(q->vlan);
+       rcu_read_lock();
+       vlan = rcu_dereference(q->vlan);
        if (vlan)
                vlan->dev->stats.tx_dropped++;
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return err;
 }
@@ -827,11 +892,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
        copied += len;
 
 done:
-       rcu_read_lock_bh();
-       vlan = rcu_dereference_bh(q->vlan);
+       rcu_read_lock();
+       vlan = rcu_dereference(q->vlan);
        if (vlan)
                macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return ret ? ret : copied;
 }
@@ -845,7 +910,9 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
        ssize_t ret = 0;
 
        while (len) {
-               prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE);
+               if (!noblock)
+                       prepare_to_wait(sk_sleep(&q->sk), &wait,
+                                       TASK_INTERRUPTIBLE);
 
                /* Read frames from the queue */
                skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -867,7 +934,8 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
                break;
        }
 
-       finish_wait(sk_sleep(&q->sk), &wait);
+       if (!noblock)
+               finish_wait(sk_sleep(&q->sk), &wait);
        return ret;
 }
 
@@ -890,6 +958,96 @@ out:
        return ret;
 }
 
+static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
+{
+       struct macvlan_dev *vlan;
+
+       ASSERT_RTNL();
+       vlan = rtnl_dereference(q->vlan);
+       if (vlan)
+               dev_hold(vlan->dev);
+
+       return vlan;
+}
+
+static void macvtap_put_vlan(struct macvlan_dev *vlan)
+{
+       dev_put(vlan->dev);
+}
+
+static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
+{
+       struct macvtap_queue *q = file->private_data;
+       struct macvlan_dev *vlan;
+       int ret;
+
+       vlan = macvtap_get_vlan(q);
+       if (!vlan)
+               return -EINVAL;
+
+       if (flags & IFF_ATTACH_QUEUE)
+               ret = macvtap_enable_queue(vlan->dev, file, q);
+       else if (flags & IFF_DETACH_QUEUE)
+               ret = macvtap_disable_queue(q);
+       else
+               ret = -EINVAL;
+
+       macvtap_put_vlan(vlan);
+       return ret;
+}
+
+static int set_offload(struct macvtap_queue *q, unsigned long arg)
+{
+       struct macvlan_dev *vlan;
+       netdev_features_t features;
+       netdev_features_t feature_mask = 0;
+
+       vlan = rtnl_dereference(q->vlan);
+       if (!vlan)
+               return -ENOLINK;
+
+       features = vlan->dev->features;
+
+       if (arg & TUN_F_CSUM) {
+               feature_mask = NETIF_F_HW_CSUM;
+
+               if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
+                       if (arg & TUN_F_TSO_ECN)
+                               feature_mask |= NETIF_F_TSO_ECN;
+                       if (arg & TUN_F_TSO4)
+                               feature_mask |= NETIF_F_TSO;
+                       if (arg & TUN_F_TSO6)
+                               feature_mask |= NETIF_F_TSO6;
+               }
+
+               if (arg & TUN_F_UFO)
+                       feature_mask |= NETIF_F_UFO;
+       }
+
+       /* tun/tap driver inverts the usage for TSO offloads, where
+        * setting the TSO bit means that the userspace wants to
+        * accept TSO frames and turning it off means that user space
+        * does not support TSO.
+        * For macvtap, we have to invert it to mean the same thing.
+        * When user space turns off TSO, we turn off GSO/LRO so that
+        * user-space will not receive TSO frames.
+        */
+       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+               features |= RX_OFFLOADS;
+       else
+               features &= ~RX_OFFLOADS;
+
+       /* tap_features are the same as features on tun/tap and
+        * reflect user expectations.
+        */
+       vlan->tap_features = vlan->dev->features &
+                           (feature_mask | ~TUN_OFFLOADS);
+       vlan->set_features = features;
+       netdev_update_features(vlan->dev);
+
+       return 0;
+}
+
 /*
  * provide compatibility with generic tun/tap interface
  */
@@ -913,7 +1071,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                        return -EFAULT;
 
                ret = 0;
-               if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
+               if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
+                   (IFF_NO_PI | IFF_TAP))
                        ret = -EINVAL;
                else
                        q->flags = u;
@@ -921,24 +1080,31 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                return ret;
 
        case TUNGETIFF:
-               rcu_read_lock_bh();
-               vlan = rcu_dereference_bh(q->vlan);
-               if (vlan)
-                       dev_hold(vlan->dev);
-               rcu_read_unlock_bh();
-
-               if (!vlan)
+               rtnl_lock();
+               vlan = macvtap_get_vlan(q);
+               if (!vlan) {
+                       rtnl_unlock();
                        return -ENOLINK;
+               }
 
                ret = 0;
                if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
                    put_user(q->flags, &ifr->ifr_flags))
                        ret = -EFAULT;
-               dev_put(vlan->dev);
+               macvtap_put_vlan(vlan);
+               rtnl_unlock();
                return ret;
 
+       case TUNSETQUEUE:
+               if (get_user(u, &ifr->ifr_flags))
+                       return -EFAULT;
+               rtnl_lock();
+               ret = macvtap_ioctl_set_queue(file, u);
+               rtnl_unlock();
+
        case TUNGETFEATURES:
-               if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
+               if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
+                            IFF_MULTI_QUEUE, up))
                        return -EFAULT;
                return 0;
 
@@ -974,7 +1140,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                         got enabled for forwarded frames */
                if (!(q->flags & IFF_VNET_HDR))
                        return  -EINVAL;
-               return 0;
+               rtnl_lock();
+               ret = set_offload(q, arg);
+               rtnl_unlock();
+               return ret;
 
        default:
                return -EINVAL;
@@ -1053,7 +1222,7 @@ EXPORT_SYMBOL_GPL(macvtap_get_socket);
 static int macvtap_device_event(struct notifier_block *unused,
                                unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct macvlan_dev *vlan;
        struct device *classdev;
        dev_t devt;
index 59ac143dec2576a697d9f383ef4d5d7372eef93c..1d1d0a12765ce943b1bb309bdcbc0534908d8f53 100644 (file)
@@ -653,12 +653,11 @@ static struct configfs_subsystem netconsole_subsys = {
 
 /* Handle network interface device notifications */
 static int netconsole_netdev_event(struct notifier_block *this,
-                                  unsigned long event,
-                                  void *ptr)
+                                  unsigned long event, void *ptr)
 {
        unsigned long flags;
        struct netconsole_target *nt;
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        bool stopped = false;
 
        if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
new file mode 100644 (file)
index 0000000..dc364be
--- /dev/null
@@ -0,0 +1,170 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <net/net_namespace.h>
+#include <linux/if_arp.h>
+
+struct pcpu_lstats {
+       u64 packets;
+       u64 bytes;
+       struct u64_stats_sync syncp;
+};
+
+static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       int len = skb->len;
+       struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
+
+       u64_stats_update_begin(&stats->syncp);
+       stats->bytes += len;
+       stats->packets++;
+       u64_stats_update_end(&stats->syncp);
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static int nlmon_is_valid_mtu(int new_mtu)
+{
+       return new_mtu >= sizeof(struct nlmsghdr) && new_mtu <= INT_MAX;
+}
+
+static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
+{
+       if (!nlmon_is_valid_mtu(new_mtu))
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static int nlmon_dev_init(struct net_device *dev)
+{
+       dev->lstats = alloc_percpu(struct pcpu_lstats);
+
+       return dev->lstats == NULL ? -ENOMEM : 0;
+}
+
+static void nlmon_dev_uninit(struct net_device *dev)
+{
+       free_percpu(dev->lstats);
+}
+
+static struct netlink_tap nlmon_tap;
+
+static int nlmon_open(struct net_device *dev)
+{
+       return netlink_add_tap(&nlmon_tap);
+}
+
+static int nlmon_close(struct net_device *dev)
+{
+       return netlink_remove_tap(&nlmon_tap);
+}
+
+static struct rtnl_link_stats64 *
+nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       int i;
+       u64 bytes = 0, packets = 0;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_lstats *nl_stats;
+               u64 tbytes, tpackets;
+               unsigned int start;
+
+               nl_stats = per_cpu_ptr(dev->lstats, i);
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&nl_stats->syncp);
+                       tbytes = nl_stats->bytes;
+                       tpackets = nl_stats->packets;
+               } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start));
+
+               packets += tpackets;
+               bytes += tbytes;
+       }
+
+       stats->rx_packets = packets;
+       stats->tx_packets = 0;
+
+       stats->rx_bytes = bytes;
+       stats->tx_bytes = 0;
+
+       return stats;
+}
+
+static u32 always_on(struct net_device *dev)
+{
+       return 1;
+}
+
+static const struct ethtool_ops nlmon_ethtool_ops = {
+       .get_link = always_on,
+};
+
+static const struct net_device_ops nlmon_ops = {
+       .ndo_init = nlmon_dev_init,
+       .ndo_uninit = nlmon_dev_uninit,
+       .ndo_open = nlmon_open,
+       .ndo_stop = nlmon_close,
+       .ndo_start_xmit = nlmon_xmit,
+       .ndo_get_stats64 = nlmon_get_stats64,
+       .ndo_change_mtu = nlmon_change_mtu,
+};
+
+static struct netlink_tap nlmon_tap __read_mostly = {
+       .module = THIS_MODULE,
+};
+
+static void nlmon_setup(struct net_device *dev)
+{
+       dev->type = ARPHRD_NETLINK;
+       dev->tx_queue_len = 0;
+
+       dev->netdev_ops = &nlmon_ops;
+       dev->ethtool_ops = &nlmon_ethtool_ops;
+       dev->destructor = free_netdev;
+
+       dev->features = NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+       dev->flags = IFF_NOARP;
+
+       /* That's rather a softlimit here, which, of course,
+        * can be altered. Not a real MTU, but what is to be
+        * expected in most cases.
+        */
+       dev->mtu = NLMSG_GOODSIZE;
+}
+
+static __init int nlmon_register(void)
+{
+       int err;
+       struct net_device *nldev;
+
+       nldev = nlmon_tap.dev = alloc_netdev(0, "netlink", nlmon_setup);
+       if (unlikely(nldev == NULL))
+               return -ENOMEM;
+
+       err = register_netdev(nldev);
+       if (unlikely(err))
+               free_netdev(nldev);
+
+       return err;
+}
+
+static __exit void nlmon_unregister(void)
+{
+       struct net_device *nldev = nlmon_tap.dev;
+
+       unregister_netdev(nldev);
+}
+
+module_init(nlmon_register);
+module_exit(nlmon_unregister);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
+MODULE_AUTHOR("Mathieu Geli <geli@enseirb.fr>");
+MODULE_DESCRIPTION("Netlink monitoring device");
index ed947dd76fbd1ded245f3e649dcf920cbe86320f..f3cdf64997d60dd6937ba8885dab3c72525b1f2b 100644 (file)
@@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
        if (dev == NULL)
                return;
 
+       list_del(&dev->list);
+
        ndev = dev->ndev;
 
        unregister_netdev(ndev);
index 1e11f2bfd9cef59bbe1f1e563c28918c6ecc7f16..3a316b30089f9139384ef35b6424ccc6bd8fbe0a 100644 (file)
@@ -144,6 +144,16 @@ config MDIO_OCTEON
 
          If in doubt, say Y.
 
+config MDIO_SUN4I
+       tristate "Allwinner sun4i MDIO interface support"
+       depends on ARCH_SUNXI
+       select REGULATOR
+       select REGULATOR_FIXED_VOLTAGE
+       help
+         This driver supports the MDIO interface found in the network
+         interface units of the Allwinner SoC that have an EMAC (A10,
+         A12, A10s, etc.)
+
 config MDIO_BUS_MUX
        tristate
        depends on OF_MDIO
index 9645e389a58d34fbdb0c74a737d61b33a4885b6a..23a2ab2e847e54d0d5b7ace4e4002f6c836c49e4 100644 (file)
@@ -30,3 +30,4 @@ obj-$(CONFIG_AMD_PHY)         += amd.o
 obj-$(CONFIG_MDIO_BUS_MUX)     += mdio-mux.o
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)        += mdio-mux-gpio.o
 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_SUN4I)       += mdio-sun4i.o
index 45cbc10de01cddb802153c7c00fcab6f72486db9..1f7091b3c27ca5e972425602f7e56ce45c5d92a2 100644 (file)
 #define AT803X_MMD_ACCESS_CONTROL              0x0D
 #define AT803X_MMD_ACCESS_CONTROL_DATA         0x0E
 #define AT803X_FUNC_DATA                       0x4003
+#define AT803X_DEBUG_ADDR                      0x1D
+#define AT803X_DEBUG_DATA                      0x1E
+#define AT803X_DEBUG_SYSTEM_MODE_CTRL          0x05
+#define AT803X_DEBUG_RGMII_TX_CLK_DLY          BIT(8)
 
 MODULE_DESCRIPTION("Atheros 803x PHY driver");
 MODULE_AUTHOR("Matus Ujhelyi");
 MODULE_LICENSE("GPL");
 
-static void at803x_set_wol_mac_addr(struct phy_device *phydev)
+static int at803x_set_wol(struct phy_device *phydev,
+                         struct ethtool_wolinfo *wol)
 {
        struct net_device *ndev = phydev->attached_dev;
        const u8 *mac;
+       int ret;
+       u32 value;
        unsigned int i, offsets[] = {
                AT803X_LOC_MAC_ADDR_32_47_OFFSET,
                AT803X_LOC_MAC_ADDR_16_31_OFFSET,
@@ -43,30 +50,61 @@ static void at803x_set_wol_mac_addr(struct phy_device *phydev)
        };
 
        if (!ndev)
-               return;
+               return -ENODEV;
 
-       mac = (const u8 *) ndev->dev_addr;
+       if (wol->wolopts & WAKE_MAGIC) {
+               mac = (const u8 *) ndev->dev_addr;
 
-       if (!is_valid_ether_addr(mac))
-               return;
+               if (!is_valid_ether_addr(mac))
+                       return -EFAULT;
 
-       for (i = 0; i < 3; i++) {
-               phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
+               for (i = 0; i < 3; i++) {
+                       phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
                                  AT803X_DEVICE_ADDR);
-               phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
+                       phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
                                  offsets[i]);
-               phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
+                       phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
                                  AT803X_FUNC_DATA);
-               phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
+                       phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
                                  mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+               }
+
+               value = phy_read(phydev, AT803X_INTR_ENABLE);
+               value |= AT803X_WOL_ENABLE;
+               ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
+               if (ret)
+                       return ret;
+               value = phy_read(phydev, AT803X_INTR_STATUS);
+       } else {
+               value = phy_read(phydev, AT803X_INTR_ENABLE);
+               value &= (~AT803X_WOL_ENABLE);
+               ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
+               if (ret)
+                       return ret;
+               value = phy_read(phydev, AT803X_INTR_STATUS);
        }
+
+       return ret;
+}
+
+static void at803x_get_wol(struct phy_device *phydev,
+                          struct ethtool_wolinfo *wol)
+{
+       u32 value;
+
+       wol->supported = WAKE_MAGIC;
+       wol->wolopts = 0;
+
+       value = phy_read(phydev, AT803X_INTR_ENABLE);
+       if (value & AT803X_WOL_ENABLE)
+               wol->wolopts |= WAKE_MAGIC;
 }
 
 static int at803x_config_init(struct phy_device *phydev)
 {
        int val;
+       int ret;
        u32 features;
-       int status;
 
        features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
                   SUPPORTED_FIBRE | SUPPORTED_BNC;
@@ -100,20 +138,29 @@ static int at803x_config_init(struct phy_device *phydev)
        phydev->supported = features;
        phydev->advertising = features;
 
-       /* enable WOL */
-       at803x_set_wol_mac_addr(phydev);
-       status = phy_write(phydev, AT803X_INTR_ENABLE, AT803X_WOL_ENABLE);
-       status = phy_read(phydev, AT803X_INTR_STATUS);
+       if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+               ret = phy_write(phydev, AT803X_DEBUG_ADDR,
+                               AT803X_DEBUG_SYSTEM_MODE_CTRL);
+               if (ret)
+                       return ret;
+               ret = phy_write(phydev, AT803X_DEBUG_DATA,
+                               AT803X_DEBUG_RGMII_TX_CLK_DLY);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
 
-/* ATHEROS 8035 */
-static struct phy_driver at8035_driver = {
+static struct phy_driver at803x_driver[] = {
+{
+       /* ATHEROS 8035 */
        .phy_id         = 0x004dd072,
        .name           = "Atheros 8035 ethernet",
        .phy_id_mask    = 0xffffffef,
        .config_init    = at803x_config_init,
+       .set_wol        = at803x_set_wol,
+       .get_wol        = at803x_get_wol,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .config_aneg    = &genphy_config_aneg,
@@ -121,14 +168,14 @@ static struct phy_driver at8035_driver = {
        .driver         = {
                .owner = THIS_MODULE,
        },
-};
-
-/* ATHEROS 8030 */
-static struct phy_driver at8030_driver = {
+}, {
+       /* ATHEROS 8030 */
        .phy_id         = 0x004dd076,
        .name           = "Atheros 8030 ethernet",
        .phy_id_mask    = 0xffffffef,
        .config_init    = at803x_config_init,
+       .set_wol        = at803x_set_wol,
+       .get_wol        = at803x_get_wol,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .config_aneg    = &genphy_config_aneg,
@@ -136,32 +183,33 @@ static struct phy_driver at8030_driver = {
        .driver         = {
                .owner = THIS_MODULE,
        },
-};
+}, {
+       /* ATHEROS 8031 */
+       .phy_id         = 0x004dd074,
+       .name           = "Atheros 8031 ethernet",
+       .phy_id_mask    = 0xffffffef,
+       .config_init    = at803x_config_init,
+       .set_wol        = at803x_set_wol,
+       .get_wol        = at803x_get_wol,
+       .features       = PHY_GBIT_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
+       .config_aneg    = &genphy_config_aneg,
+       .read_status    = &genphy_read_status,
+       .driver         = {
+               .owner = THIS_MODULE,
+       },
+} };
 
 static int __init atheros_init(void)
 {
-       int ret;
-
-       ret = phy_driver_register(&at8035_driver);
-       if (ret)
-               goto err1;
-
-       ret = phy_driver_register(&at8030_driver);
-       if (ret)
-               goto err2;
-
-       return 0;
-
-err2:
-       phy_driver_unregister(&at8035_driver);
-err1:
-       return ret;
+       return phy_drivers_register(at803x_driver,
+                                   ARRAY_SIZE(at803x_driver));
 }
 
 static void __exit atheros_exit(void)
 {
-       phy_driver_unregister(&at8035_driver);
-       phy_driver_unregister(&at8030_driver);
+       return phy_drivers_unregister(at803x_driver,
+                                     ARRAY_SIZE(at803x_driver));
 }
 
 module_init(atheros_init);
index 84c7a39b1c659a91a82cb8c82d1436e0f994cc5b..ac55b08078534e8170416630dced41dba150564c 100644 (file)
@@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .name           = "Broadcom BCM63XX (1)",
        /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
-       .flags          = PHY_HAS_INTERRUPT,
+       .flags          = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -91,7 +91,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .phy_id_mask    = 0xfffffc00,
        .name           = "Broadcom BCM63XX (2)",
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
-       .flags          = PHY_HAS_INTERRUPT,
+       .flags          = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
index 202fe1ff1987950effe6e0a0426894a0ad8dea78..2e91477362d4d70b15df3db59ad41dec990a3c57 100644 (file)
 #define MII_M1011_PHY_STATUS_RESOLVED  0x0800
 #define MII_M1011_PHY_STATUS_LINK      0x0400
 
+#define MII_M1116R_CONTROL_REG_MAC     21
+
 
 MODULE_DESCRIPTION("Marvell PHY driver");
 MODULE_AUTHOR("Andy Fleming");
@@ -372,6 +374,66 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
        return m88e1121_config_aneg(phydev);
 }
 
+static int m88e1510_config_aneg(struct phy_device *phydev)
+{
+       int err;
+
+       err = m88e1318_config_aneg(phydev);
+       if (err < 0)
+               return err;
+
+       return marvell_of_reg_init(phydev);
+}
+
+static int m88e1116r_config_init(struct phy_device *phydev)
+{
+       int temp;
+       int err;
+
+       temp = phy_read(phydev, MII_BMCR);
+       temp |= BMCR_RESET;
+       err = phy_write(phydev, MII_BMCR, temp);
+       if (err < 0)
+               return err;
+
+       mdelay(500);
+
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+       if (err < 0)
+               return err;
+
+       temp = phy_read(phydev, MII_M1011_PHY_SCR);
+       temp |= (7 << 12);      /* max number of gigabit attempts */
+       temp |= (1 << 11);      /* enable downshift */
+       temp |= MII_M1011_PHY_SCR_AUTO_CROSS;
+       err = phy_write(phydev, MII_M1011_PHY_SCR, temp);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 2);
+       if (err < 0)
+               return err;
+       temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC);
+       temp |= (1 << 5);
+       temp |= (1 << 4);
+       err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp);
+       if (err < 0)
+               return err;
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+       if (err < 0)
+               return err;
+
+       temp = phy_read(phydev, MII_BMCR);
+       temp |= BMCR_RESET;
+       err = phy_write(phydev, MII_BMCR, temp);
+       if (err < 0)
+               return err;
+
+       mdelay(500);
+
+       return 0;
+}
+
 static int m88e1111_config_init(struct phy_device *phydev)
 {
        int err;
@@ -940,6 +1002,32 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .driver = { .owner = THIS_MODULE },
        },
+       {
+               .phy_id = MARVELL_PHY_ID_88E1116R,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E1116R",
+               .features = PHY_GBIT_FEATURES,
+               .flags = PHY_HAS_INTERRUPT,
+               .config_init = &m88e1116r_config_init,
+               .config_aneg = &genphy_config_aneg,
+               .read_status = &genphy_read_status,
+               .ack_interrupt = &marvell_ack_interrupt,
+               .config_intr = &marvell_config_intr,
+               .driver = { .owner = THIS_MODULE },
+       },
+       {
+               .phy_id = MARVELL_PHY_ID_88E1510,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E1510",
+               .features = PHY_GBIT_FEATURES,
+               .flags = PHY_HAS_INTERRUPT,
+               .config_aneg = &m88e1510_config_aneg,
+               .read_status = &marvell_read_status,
+               .ack_interrupt = &marvell_ack_interrupt,
+               .config_intr = &marvell_config_intr,
+               .did_interrupt = &m88e1121_did_interrupt,
+               .driver = { .owner = THIS_MODULE },
+       },
 };
 
 static int __init marvell_init(void)
@@ -958,15 +1046,17 @@ module_init(marvell_init);
 module_exit(marvell_exit);
 
 static struct mdio_device_id __maybe_unused marvell_tbl[] = {
-       { 0x01410c60, 0xfffffff0 },
-       { 0x01410c90, 0xfffffff0 },
-       { 0x01410cc0, 0xfffffff0 },
-       { 0x01410e10, 0xfffffff0 },
-       { 0x01410cb0, 0xfffffff0 },
-       { 0x01410cd0, 0xfffffff0 },
-       { 0x01410e50, 0xfffffff0 },
-       { 0x01410e30, 0xfffffff0 },
-       { 0x01410e90, 0xfffffff0 },
+       { MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1118, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1121R, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1145, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1149R, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1240, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
        { }
 };
 
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
new file mode 100644 (file)
index 0000000..61d3f4e
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Allwinner EMAC MDIO interface driver
+ *
+ * Copyright 2012-2013 Stefan Roese <sr@denx.de>
+ * Copyright 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on the Linux driver provided by Allwinner:
+ * Copyright (C) 1997  Sten Wang
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define EMAC_MAC_MCMD_REG      (0x00)
+#define EMAC_MAC_MADR_REG      (0x04)
+#define EMAC_MAC_MWTD_REG      (0x08)
+#define EMAC_MAC_MRDD_REG      (0x0c)
+#define EMAC_MAC_MIND_REG      (0x10)
+#define EMAC_MAC_SSRR_REG      (0x14)
+
+#define MDIO_TIMEOUT           (msecs_to_jiffies(100))
+
+struct sun4i_mdio_data {
+       void __iomem            *membase;
+       struct regulator        *regulator;
+};
+
+static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+       struct sun4i_mdio_data *data = bus->priv;
+       unsigned long start_jiffies;
+       int value;
+
+       /* issue the phy address and reg */
+       writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
+       /* pull up the phy io line */
+       writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
+
+       /* Wait read complete */
+       start_jiffies = jiffies;
+       while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
+               if (time_after(start_jiffies,
+                              start_jiffies + MDIO_TIMEOUT))
+                       return -ETIMEDOUT;
+               msleep(1);
+       }
+
+       /* push down the phy io line */
+       writel(0x0, data->membase + EMAC_MAC_MCMD_REG);
+       /* and read data */
+       value = readl(data->membase + EMAC_MAC_MRDD_REG);
+
+       return value;
+}
+
+static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+                           u16 value)
+{
+       struct sun4i_mdio_data *data = bus->priv;
+       unsigned long start_jiffies;
+
+       /* issue the phy address and reg */
+       writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
+       /* pull up the phy io line */
+       writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
+
+       /* Wait read complete */
+       start_jiffies = jiffies;
+       while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
+               if (time_after(start_jiffies,
+                              start_jiffies + MDIO_TIMEOUT))
+                       return -ETIMEDOUT;
+               msleep(1);
+       }
+
+       /* push down the phy io line */
+       writel(0x0, data->membase + EMAC_MAC_MCMD_REG);
+       /* and write data */
+       writel(value, data->membase + EMAC_MAC_MWTD_REG);
+
+       return 0;
+}
+
+static int sun4i_mdio_reset(struct mii_bus *bus)
+{
+       return 0;
+}
+
+static int sun4i_mdio_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct mii_bus *bus;
+       struct sun4i_mdio_data *data;
+       int ret, i;
+
+       bus = mdiobus_alloc_size(sizeof(*data));
+       if (!bus)
+               return -ENOMEM;
+
+       bus->name = "sun4i_mii_bus";
+       bus->read = &sun4i_mdio_read;
+       bus->write = &sun4i_mdio_write;
+       bus->reset = &sun4i_mdio_reset;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+       bus->parent = &pdev->dev;
+
+       bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       if (!bus->irq) {
+               ret = -ENOMEM;
+               goto err_out_free_mdiobus;
+       }
+
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               bus->irq[i] = PHY_POLL;
+
+       data = bus->priv;
+       data->membase = of_iomap(np, 0);
+       if (!data->membase) {
+               ret = -ENOMEM;
+               goto err_out_free_mdio_irq;
+       }
+
+       data->regulator = devm_regulator_get(&pdev->dev, "phy");
+       if (IS_ERR(data->regulator)) {
+               if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+
+               dev_info(&pdev->dev, "no regulator found\n");
+       } else {
+               ret = regulator_enable(data->regulator);
+               if (ret)
+                       goto err_out_free_mdio_irq;
+       }
+
+       ret = of_mdiobus_register(bus, np);
+       if (ret < 0)
+               goto err_out_disable_regulator;
+
+       platform_set_drvdata(pdev, bus);
+
+       return 0;
+
+err_out_disable_regulator:
+       regulator_disable(data->regulator);
+err_out_free_mdio_irq:
+       kfree(bus->irq);
+err_out_free_mdiobus:
+       mdiobus_free(bus);
+       return ret;
+}
+
+static int sun4i_mdio_remove(struct platform_device *pdev)
+{
+       struct mii_bus *bus = platform_get_drvdata(pdev);
+
+       mdiobus_unregister(bus);
+       kfree(bus->irq);
+       mdiobus_free(bus);
+
+       return 0;
+}
+
+static const struct of_device_id sun4i_mdio_dt_ids[] = {
+       { .compatible = "allwinner,sun4i-mdio" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sun4i_mdio_dt_ids);
+
+static struct platform_driver sun4i_mdio_driver = {
+       .probe = sun4i_mdio_probe,
+       .remove = sun4i_mdio_remove,
+       .driver = {
+               .name = "sun4i-mdio",
+               .of_match_table = sun4i_mdio_dt_ids,
+       },
+};
+
+module_platform_driver(sun4i_mdio_driver);
+
+MODULE_DESCRIPTION("Allwinner EMAC MDIO interface driver");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_LICENSE("GPL");
index c14f14741b3f7a8c4a6634d2f8b4b0b9c254d5df..10d058ab4f7969b50a0f8b378ac360e263b2104e 100644 (file)
@@ -294,7 +294,8 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
        cmd->duplex = phydev->duplex;
        cmd->port = PORT_MII;
        cmd->phy_address = phydev->addr;
-       cmd->transceiver = XCVR_EXTERNAL;
+       cmd->transceiver = phy_is_internal(phydev) ?
+               XCVR_INTERNAL : XCVR_EXTERNAL;
        cmd->autoneg = phydev->autoneg;
 
        return 0;
@@ -419,8 +420,6 @@ out_unlock:
 EXPORT_SYMBOL(phy_start_aneg);
 
 
-static void phy_change(struct work_struct *work);
-
 /**
  * phy_start_machine - start PHY state machine tracking
  * @phydev: the phy_device struct
@@ -565,8 +564,6 @@ int phy_start_interrupts(struct phy_device *phydev)
 {
        int err = 0;
 
-       INIT_WORK(&phydev->phy_queue, phy_change);
-
        atomic_set(&phydev->irq_disable, 0);
        if (request_irq(phydev->irq, phy_interrupt,
                                IRQF_SHARED,
@@ -623,7 +620,7 @@ EXPORT_SYMBOL(phy_stop_interrupts);
  * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  * @work: work_struct that describes the work to be done
  */
-static void phy_change(struct work_struct *work)
+void phy_change(struct work_struct *work)
 {
        int err;
        struct phy_device *phydev =
@@ -682,7 +679,7 @@ void phy_stop(struct phy_device *phydev)
        if (PHY_HALTED == phydev->state)
                goto out_unlock;
 
-       if (phydev->irq != PHY_POLL) {
+       if (phy_interrupt_is_valid(phydev)) {
                /* Disable PHY Interrupts */
                phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
 
@@ -828,8 +825,9 @@ void phy_state_machine(struct work_struct *work)
                        break;
                case PHY_RUNNING:
                        /* Only register a CHANGE if we are
-                        * polling */
-                       if (PHY_POLL == phydev->irq)
+                        * polling or ignoring interrupts
+                        */
+                       if (!phy_interrupt_is_valid(phydev))
                                phydev->state = PHY_CHANGELINK;
                        break;
                case PHY_CHANGELINK:
@@ -848,7 +846,7 @@ void phy_state_machine(struct work_struct *work)
 
                        phydev->adjust_link(phydev->attached_dev);
 
-                       if (PHY_POLL != phydev->irq)
+                       if (phy_interrupt_is_valid(phydev))
                                err = phy_config_interrupt(phydev,
                                                PHY_INTERRUPT_ENABLED);
                        break;
@@ -921,6 +919,14 @@ void phy_state_machine(struct work_struct *work)
        schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
 }
 
+void phy_mac_interrupt(struct phy_device *phydev, int new_link)
+{
+       cancel_work_sync(&phydev->phy_queue);
+       phydev->link = new_link;
+       schedule_work(&phydev->phy_queue);
+}
+EXPORT_SYMBOL(phy_mac_interrupt);
+
 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
                                    int addr)
 {
@@ -1044,7 +1050,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
-               if ((lp & adv & settings[idx].setting))
+               if (!(lp & adv & settings[idx].setting))
                        goto eee_exit;
 
                if (clk_stop_enable) {
index 3657b4a29124b57bc335417a1389f792aaf2ce79..74630e94fa3bc323b9528b691514d7456b9abd6d 100644 (file)
@@ -189,6 +189,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
 
        mutex_init(&dev->lock);
        INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
+       INIT_WORK(&dev->phy_queue, phy_change);
 
        /* Request the appropriate module unconditionally; don't
           bother trying to do so only if it isn't already loaded,
@@ -1009,10 +1010,16 @@ static int phy_probe(struct device *dev)
        phydrv = to_phy_driver(drv);
        phydev->drv = phydrv;
 
-       /* Disable the interrupt if the PHY doesn't support it */
-       if (!(phydrv->flags & PHY_HAS_INTERRUPT))
+       /* Disable the interrupt if the PHY doesn't support it
+        * but the interrupt is still a valid one
+        */
+       if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
+                       phy_interrupt_is_valid(phydev))
                phydev->irq = PHY_POLL;
 
+       if (phydrv->flags & PHY_IS_INTERNAL)
+               phydev->is_internal = true;
+
        mutex_lock(&phydev->lock);
 
        /* Start out supporting everything. Eventually,
index d11c93e69e03fafac4efee44cad6c47e55380dda..f3bea1346021273b85cfc8d5016e3def067eec46 100644 (file)
@@ -354,19 +354,7 @@ static struct spi_driver ks8995_driver = {
        .remove   = ks8995_remove,
 };
 
-static int __init ks8995_init(void)
-{
-       pr_info(DRV_DESC " version " DRV_VERSION "\n");
-
-       return spi_register_driver(&ks8995_driver);
-}
-module_init(ks8995_init);
-
-static void __exit ks8995_exit(void)
-{
-       spi_unregister_driver(&ks8995_driver);
-}
-module_exit(ks8995_exit);
+module_spi_driver(ks8995_driver);
 
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_VERSION(DRV_VERSION);
index 3492b53912731eae9407fe6dbb15468d35429263..69b482bce7d2449688fc67731cf923ce47ffcc32 100644 (file)
 #define MII_VSC8244_ISTAT_DUPLEX       0x1000
 
 /* Vitesse Auxiliary Control/Status Register */
-#define MII_VSC8244_AUX_CONSTAT                0x1c
-#define MII_VSC8244_AUXCONSTAT_INIT            0x0000
-#define MII_VSC8244_AUXCONSTAT_DUPLEX          0x0020
-#define MII_VSC8244_AUXCONSTAT_SPEED           0x0018
-#define MII_VSC8244_AUXCONSTAT_GBIT            0x0010
-#define MII_VSC8244_AUXCONSTAT_100             0x0008
+#define MII_VSC8244_AUX_CONSTAT                0x1c
+#define MII_VSC8244_AUXCONSTAT_INIT    0x0000
+#define MII_VSC8244_AUXCONSTAT_DUPLEX  0x0020
+#define MII_VSC8244_AUXCONSTAT_SPEED   0x0018
+#define MII_VSC8244_AUXCONSTAT_GBIT    0x0010
+#define MII_VSC8244_AUXCONSTAT_100     0x0008
 
 #define MII_VSC8221_AUXCONSTAT_INIT    0x0004 /* need to set this bit? */
 #define MII_VSC8221_AUXCONSTAT_RESERVED        0x0004
 
 #define PHY_ID_VSC8244                 0x000fc6c0
 #define PHY_ID_VSC8221                 0x000fc550
+#define PHY_ID_VSC8211                 0x000fc4b0
 
 MODULE_DESCRIPTION("Vitesse PHY driver");
 MODULE_AUTHOR("Kriston Carson");
@@ -100,9 +101,8 @@ static int vsc824x_config_init(struct phy_device *phydev)
 static int vsc824x_ack_interrupt(struct phy_device *phydev)
 {
        int err = 0;
-       
-       /*
-        * Don't bother to ACK the interrupts if interrupts
+
+       /* Don't bother to ACK the interrupts if interrupts
         * are disabled.  The 824x cannot clear the interrupts
         * if they are disabled.
         */
@@ -122,8 +122,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
                                MII_VSC8244_IMASK_MASK :
                                MII_VSC8221_IMASK_MASK);
        else {
-               /*
-                * The Vitesse PHY cannot clear the interrupt
+               /* The Vitesse PHY cannot clear the interrupt
                 * once it has disabled them, so we clear them first
                 */
                err = phy_read(phydev, MII_VSC8244_ISTAT);
@@ -146,7 +145,8 @@ static int vsc8221_config_init(struct phy_device *phydev)
        return err;
 
        /* Perhaps we should set EXT_CON1 based on the interface?
-          Options are 802.3Z SerDes or SGMII */
+        * Options are 802.3Z SerDes or SGMII
+        */
 }
 
 /* Vitesse 824x */
@@ -176,6 +176,19 @@ static struct phy_driver vsc82xx_driver[] = {
        .ack_interrupt  = &vsc824x_ack_interrupt,
        .config_intr    = &vsc82xx_config_intr,
        .driver         = { .owner = THIS_MODULE,},
+}, {
+       /* Vitesse 8211 */
+       .phy_id         = PHY_ID_VSC8211,
+       .phy_id_mask    = 0x000ffff0,
+       .name           = "Vitesse VSC8211",
+       .features       = PHY_GBIT_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
+       .config_init    = &vsc8221_config_init,
+       .config_aneg    = &genphy_config_aneg,
+       .read_status    = &genphy_read_status,
+       .ack_interrupt  = &vsc824x_ack_interrupt,
+       .config_intr    = &vsc82xx_config_intr,
+       .driver         = { .owner = THIS_MODULE,},
 } };
 
 static int __init vsc82xx_init(void)
@@ -196,6 +209,7 @@ module_exit(vsc82xx_exit);
 static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
        { PHY_ID_VSC8244, 0x000fffc0 },
        { PHY_ID_VSC8221, 0x000ffff0 },
+       { PHY_ID_VSC8211, 0x000ffff0 },
        { }
 };
 
index bb07ba94c3aaedb8949c310d340c18ae1b9ab284..5f66e30d98239651283ec200cbf3c842e60eb2ae 100644 (file)
@@ -338,7 +338,7 @@ static void pppoe_flush_dev(struct net_device *dev)
 static int pppoe_device_event(struct notifier_block *this,
                              unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        /* Only look at sockets that are using this specific device. */
        switch (event) {
index 7c43261975bd1d57e93a4b35382e42c92891cfb1..bff7e0b0b4e70d10ccdf9a6de11cdbae056a64f0 100644 (file)
@@ -525,31 +525,26 @@ static void team_set_no_mode(struct team *team)
        team->mode = &__team_no_mode;
 }
 
-static void __team_adjust_ops(struct team *team, int en_port_count)
+static void team_adjust_ops(struct team *team)
 {
        /*
         * To avoid checks in rx/tx skb paths, ensure here that non-null and
         * correct ops are always set.
         */
 
-       if (!en_port_count || !team_is_mode_set(team) ||
+       if (!team->en_port_count || !team_is_mode_set(team) ||
            !team->mode->ops->transmit)
                team->ops.transmit = team_dummy_transmit;
        else
                team->ops.transmit = team->mode->ops->transmit;
 
-       if (!en_port_count || !team_is_mode_set(team) ||
+       if (!team->en_port_count || !team_is_mode_set(team) ||
            !team->mode->ops->receive)
                team->ops.receive = team_dummy_receive;
        else
                team->ops.receive = team->mode->ops->receive;
 }
 
-static void team_adjust_ops(struct team *team)
-{
-       __team_adjust_ops(team, team->en_port_count);
-}
-
 /*
  * We can benefit from the fact that it's ensured no port is present
  * at the time of mode change. Therefore no packets are in fly so there's no
@@ -725,9 +720,9 @@ static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
 static void __team_queue_override_port_del(struct team *team,
                                           struct team_port *port)
 {
+       if (!port->queue_id)
+               return;
        list_del_rcu(&port->qom_list);
-       synchronize_rcu();
-       INIT_LIST_HEAD(&port->qom_list);
 }
 
 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
@@ -749,9 +744,8 @@ static void __team_queue_override_port_add(struct team *team,
        struct list_head *qom_list;
        struct list_head *node;
 
-       if (!port->queue_id || !team_port_enabled(port))
+       if (!port->queue_id)
                return;
-
        qom_list = __team_get_qom_list(team, port->queue_id);
        node = qom_list;
        list_for_each_entry(cur, qom_list, qom_list) {
@@ -768,7 +762,7 @@ static void __team_queue_override_enabled_check(struct team *team)
        bool enabled = false;
 
        list_for_each_entry(port, &team->port_list, list) {
-               if (!list_empty(&port->qom_list)) {
+               if (port->queue_id) {
                        enabled = true;
                        break;
                }
@@ -780,14 +774,44 @@ static void __team_queue_override_enabled_check(struct team *team)
        team->queue_override_enabled = enabled;
 }
 
-static void team_queue_override_port_refresh(struct team *team,
-                                            struct team_port *port)
+static void team_queue_override_port_prio_changed(struct team *team,
+                                                 struct team_port *port)
 {
+       if (!port->queue_id || team_port_enabled(port))
+               return;
        __team_queue_override_port_del(team, port);
        __team_queue_override_port_add(team, port);
        __team_queue_override_enabled_check(team);
 }
 
+static void team_queue_override_port_change_queue_id(struct team *team,
+                                                    struct team_port *port,
+                                                    u16 new_queue_id)
+{
+       if (team_port_enabled(port)) {
+               __team_queue_override_port_del(team, port);
+               port->queue_id = new_queue_id;
+               __team_queue_override_port_add(team, port);
+               __team_queue_override_enabled_check(team);
+       } else {
+               port->queue_id = new_queue_id;
+       }
+}
+
+static void team_queue_override_port_add(struct team *team,
+                                        struct team_port *port)
+{
+       __team_queue_override_port_add(team, port);
+       __team_queue_override_enabled_check(team);
+}
+
+static void team_queue_override_port_del(struct team *team,
+                                        struct team_port *port)
+{
+       __team_queue_override_port_del(team, port);
+       __team_queue_override_enabled_check(team);
+}
+
 
 /****************
  * Port handling
@@ -819,7 +843,7 @@ static void team_port_enable(struct team *team,
        hlist_add_head_rcu(&port->hlist,
                           team_port_index_hash(team, port->index));
        team_adjust_ops(team);
-       team_queue_override_port_refresh(team, port);
+       team_queue_override_port_add(team, port);
        if (team->ops.port_enabled)
                team->ops.port_enabled(team, port);
 }
@@ -848,14 +872,9 @@ static void team_port_disable(struct team *team,
        hlist_del_rcu(&port->hlist);
        __reconstruct_port_hlist(team, port->index);
        port->index = -1;
-       team_queue_override_port_refresh(team, port);
-       __team_adjust_ops(team, team->en_port_count - 1);
-       /*
-        * Wait until readers see adjusted ops. This ensures that
-        * readers never see team->en_port_count == 0
-        */
-       synchronize_rcu();
        team->en_port_count--;
+       team_queue_override_port_del(team, port);
+       team_adjust_ops(team);
 }
 
 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -1092,8 +1111,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
        }
 
        port->index = -1;
-       team_port_enable(team, port);
        list_add_tail_rcu(&port->list, &team->port_list);
+       team_port_enable(team, port);
        __team_compute_features(team);
        __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
        __team_options_change_check(team);
@@ -1163,8 +1182,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
 
        team_port_set_orig_dev_addr(port);
        dev_set_mtu(port_dev, port->orig.mtu);
-       synchronize_rcu();
-       kfree(port);
+       kfree_rcu(port, rcu);
        netdev_info(dev, "Port device %s removed\n", portname);
        __team_compute_features(team);
 
@@ -1259,9 +1277,12 @@ static int team_priority_option_set(struct team *team,
                                    struct team_gsetter_ctx *ctx)
 {
        struct team_port *port = ctx->info->port;
+       s32 priority = ctx->data.s32_val;
 
-       port->priority = ctx->data.s32_val;
-       team_queue_override_port_refresh(team, port);
+       if (port->priority == priority)
+               return 0;
+       port->priority = priority;
+       team_queue_override_port_prio_changed(team, port);
        return 0;
 }
 
@@ -1278,17 +1299,16 @@ static int team_queue_id_option_set(struct team *team,
                                    struct team_gsetter_ctx *ctx)
 {
        struct team_port *port = ctx->info->port;
+       u16 new_queue_id = ctx->data.u32_val;
 
-       if (port->queue_id == ctx->data.u32_val)
+       if (port->queue_id == new_queue_id)
                return 0;
-       if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+       if (new_queue_id >= team->dev->real_num_tx_queues)
                return -EINVAL;
-       port->queue_id = ctx->data.u32_val;
-       team_queue_override_port_refresh(team, port);
+       team_queue_override_port_change_queue_id(team, port, new_queue_id);
        return 0;
 }
 
-
 static const struct team_option team_options[] = {
        {
                .name = "mode",
@@ -2374,7 +2394,8 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
        bool incomplete;
        int i;
 
-       port = list_first_entry(&team->port_list, struct team_port, list);
+       port = list_first_entry_or_null(&team->port_list,
+                                       struct team_port, list);
 
 start_again:
        err = __send_and_alloc_skb(&skb, team, portid, send_func);
@@ -2402,8 +2423,8 @@ start_again:
                err = team_nl_fill_one_port_get(skb, one_port);
                if (err)
                        goto errout;
-       } else {
-               list_for_each_entry(port, &team->port_list, list) {
+       } else if (port) {
+               list_for_each_entry_from(port, &team->port_list, list) {
                        err = team_nl_fill_one_port_get(skb, port);
                        if (err) {
                                if (err == -EMSGSIZE) {
@@ -2647,7 +2668,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
 static int team_device_event(struct notifier_block *unused,
                             unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *) ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct team_port *port;
 
        port = team_port_get_rtnl(dev);
index cdc31b5ea15eb21325b4fc1d3619288a0300e357..829a9cd2b4dac5257f2deb8eabd39c0210ffc8bc 100644 (file)
@@ -112,9 +112,8 @@ static struct team_port *lb_hash_select_tx_port(struct team *team,
                                                struct sk_buff *skb,
                                                unsigned char hash)
 {
-       int port_index;
+       int port_index = team_num_to_port_index(team, hash);
 
-       port_index = hash % team->en_port_count;
        return team_get_port_by_index_rcu(team, port_index);
 }
 
index 5ca14d463ba7d897931b41b8c8f515a67ff9829c..7f032e2113437f1ca6b2952ba4ccc28e56dd20ba 100644 (file)
@@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
 
        port_index = random_N(team->en_port_count);
        port = team_get_port_by_index_rcu(team, port_index);
+       if (unlikely(!port))
+               goto drop;
        port = team_get_first_port_txable_rcu(team, port);
        if (unlikely(!port))
                goto drop;
index d268e4de781b46b3cf2e3364770098f43f48d760..53665850b59e280c171155690848ca5b5fcbb814 100644 (file)
@@ -30,8 +30,11 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
        struct team_port *port;
        int port_index;
 
-       port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
+       port_index = team_num_to_port_index(team,
+                                           rr_priv(team)->sent_packets++);
        port = team_get_port_by_index_rcu(team, port_index);
+       if (unlikely(!port))
+               goto drop;
        port = team_get_first_port_txable_rcu(team, port);
        if (unlikely(!port))
                goto drop;
index f042b0373e5ddec6a8a85703843b89a43cbeb1f7..cea2fe4e9812cfa528593eefc19e151f098bfdd1 100644 (file)
@@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
        u32 numqueues = 0;
 
        rcu_read_lock();
-       numqueues = tun->numqueues;
+       numqueues = ACCESS_ONCE(tun->numqueues);
 
        txq = skb_get_rxhash(skb);
        if (txq) {
@@ -841,7 +841,7 @@ static const struct net_device_ops tap_netdev_ops = {
 #endif
 };
 
-static int tun_flow_init(struct tun_struct *tun)
+static void tun_flow_init(struct tun_struct *tun)
 {
        int i;
 
@@ -852,8 +852,6 @@ static int tun_flow_init(struct tun_struct *tun)
        setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
        mod_timer(&tun->flow_gc_timer,
                  round_jiffies_up(jiffies + tun->ageing_time));
-
-       return 0;
 }
 
 static void tun_flow_uninit(struct tun_struct *tun)
@@ -1530,6 +1528,9 @@ static int tun_flags(struct tun_struct *tun)
        if (tun->flags & TUN_TAP_MQ)
                flags |= IFF_MULTI_QUEUE;
 
+       if (tun->flags & TUN_PERSIST)
+               flags |= IFF_PERSIST;
+
        return flags;
 }
 
@@ -1585,6 +1586,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                else
                        return -EINVAL;
 
+               if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
+                   !!(tun->flags & TUN_TAP_MQ))
+                       return -EINVAL;
+
                if (tun_not_capable(tun))
                        return -EPERM;
                err = security_tun_dev_open(tun->security);
@@ -1655,10 +1660,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        goto err_free_dev;
 
                tun_net_init(dev);
-
-               err = tun_flow_init(tun);
-               if (err < 0)
-                       goto err_free_dev;
+               tun_flow_init(tun);
 
                dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
                        TUN_USER_FEATURES;
@@ -2155,6 +2157,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
        INIT_LIST_HEAD(&tfile->next);
 
+       sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+
        return 0;
 }
 
index 287cc624b90be5335337fe56b9ea3cdf405563d0..d84bfd4109a455661ee5160bddbe20c6cc597d01 100644 (file)
@@ -67,7 +67,6 @@ config USB_KAWETH
 
 config USB_PEGASUS
        tristate "USB Pegasus/Pegasus-II based ethernet device support"
-       select NET_CORE
        select MII
        ---help---
          Say Y here if you know you have Pegasus or Pegasus-II based adapter.
@@ -83,7 +82,6 @@ config USB_PEGASUS
 
 config USB_RTL8150
        tristate "USB RTL8150 based ethernet device support"
-       select NET_CORE
        select MII
        help
          Say Y here if you have RTL8150 based usb-ethernet adapter.
@@ -95,7 +93,6 @@ config USB_RTL8150
 
 config USB_RTL8152
        tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
-       select NET_CORE
        select MII
        help
          This option adds support for Realtek RTL8152 based USB 2.0
@@ -106,7 +103,6 @@ config USB_RTL8152
 
 config USB_USBNET
        tristate "Multi-purpose USB Networking Framework"
-       select NET_CORE
        select MII
        ---help---
          This driver supports several kinds of network links over USB,
index 078795fe6e312f22348d381e03b07c4274e07f87..04ee044dde511badbe8117f45faed74ee92b93be 100644 (file)
@@ -627,6 +627,12 @@ static const struct usb_device_id  products [] = {
        .driver_info = 0,
 },
 
+/* Huawei E1820 - handled by qmi_wwan */
+{
+       USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1),
+       .driver_info = 0,
+},
+
 /* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
 #if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
 {
index 0192073e53a30723927558e624789874511847b0..6866eae3e388b76a1a688627580d5e9b29147c05 100644 (file)
@@ -221,12 +221,9 @@ done:
                memset(skb_put(skb, padlen), 0, padlen);
        }
 
-       netdev_dbg(
-               dev->net,
-               "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
-               content_len, padlen, header_start[0], header_start[1],
-               header_start[2], header_start[3], header_start[4],
-               header_start[5]);
+       netdev_dbg(dev->net,
+               "Sending package with length %i and padding %i. Header: %6phC.",
+               content_len, padlen, header_start);
 
        return skb;
 }
@@ -263,32 +260,23 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                                sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
                                header_start, EXPECTED_UNKNOWN_HEADER_2,
                                sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
-                               netdev_dbg(
-                                       dev->net,
-                                       "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
-                                       header_start[0], header_start[1],
-                                       header_start[2], header_start[3],
-                                       header_start[4], header_start[5],
+                               netdev_dbg(dev->net,
+                                       "Received expected unknown frame header: %6phC. Package length: %i\n",
+                                       header_start,
                                        skb->len - KALMIA_HEADER_LENGTH);
                        }
                        else {
-                               netdev_err(
-                                       dev->net,
-                                       "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
-                                       header_start[0], header_start[1],
-                                       header_start[2], header_start[3],
-                                       header_start[4], header_start[5],
+                               netdev_err(dev->net,
+                                       "Received unknown frame header: %6phC. Package length: %i\n",
+                                       header_start,
                                        skb->len - KALMIA_HEADER_LENGTH);
                                return 0;
                        }
                }
                else
-                       netdev_dbg(
-                               dev->net,
-                               "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
-                               header_start[0], header_start[1], header_start[2],
-                               header_start[3], header_start[4], header_start[5],
-                               skb->len - KALMIA_HEADER_LENGTH);
+                       netdev_dbg(dev->net,
+                               "Received header: %6phC. Package length: %i\n",
+                               header_start, skb->len - KALMIA_HEADER_LENGTH);
 
                /* subtract start header and end header */
                usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
@@ -310,12 +298,9 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                                sizeof(HEADER_END_OF_USB_PACKET)) == 0);
                        if (!is_last) {
                                header_start = skb->data + ether_packet_length;
-                               netdev_dbg(
-                                       dev->net,
-                                       "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
-                                       header_start[0], header_start[1],
-                                       header_start[2], header_start[3],
-                                       header_start[4], header_start[5],
+                               netdev_dbg(dev->net,
+                                       "End header: %6phC. Package length: %i\n",
+                                       header_start,
                                        skb->len - KALMIA_HEADER_LENGTH);
                        }
                }
index cf887c2384e95004547bf56ef7dc8ea0a67e66e5..d095d0d3056b82e05df3daddae6fe0d10bb10564 100644 (file)
@@ -519,6 +519,7 @@ static const struct usb_device_id products[] = {
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
+       {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
        {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -582,6 +583,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)},    /* Cinterion PLxx */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index 14e5198886311fca92b7bf1baf1facc6b5958221..d02bac82fc5759df00a8bfab5907e29f92be722c 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/signal.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/mii.h>
@@ -1749,18 +1748,7 @@ static struct usb_driver rtl8152_driver = {
        .resume =       rtl8152_resume
 };
 
-static int __init usb_rtl8152_init(void)
-{
-       return usb_register(&rtl8152_driver);
-}
-
-static void __exit usb_rtl8152_exit(void)
-{
-       usb_deregister(&rtl8152_driver);
-}
-
-module_init(usb_rtl8152_init);
-module_exit(usb_rtl8152_exit);
+module_usb_driver(rtl8152_driver);
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
index a491d3a95393e4c533918c572ab7ba9256fa48ec..6cbdac67f3a0d3f899b0a3ca783d427b2e1c9704 100644 (file)
@@ -130,19 +130,23 @@ struct rtl8150 {
        struct usb_device *udev;
        struct tasklet_struct tl;
        struct net_device *netdev;
-       struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb;
+       struct urb *rx_urb, *tx_urb, *intr_urb;
        struct sk_buff *tx_skb, *rx_skb;
        struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE];
        spinlock_t rx_pool_lock;
        struct usb_ctrlrequest dr;
        int intr_interval;
-       __le16 rx_creg;
        u8 *intr_buff;
        u8 phy;
 };
 
 typedef struct rtl8150 rtl8150_t;
 
+struct async_req {
+       struct usb_ctrlrequest dr;
+       u16 rx_creg;
+};
+
 static const char driver_name [] = "rtl8150";
 
 /*
@@ -164,51 +168,47 @@ static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
                               indx, 0, data, size, 500);
 }
 
-static void ctrl_callback(struct urb *urb)
+static void async_set_reg_cb(struct urb *urb)
 {
-       rtl8150_t *dev;
+       struct async_req *req = (struct async_req *)urb->context;
        int status = urb->status;
 
-       switch (status) {
-       case 0:
-               break;
-       case -EINPROGRESS:
-               break;
-       case -ENOENT:
-               break;
-       default:
-               if (printk_ratelimit())
-                       dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
-       }
-       dev = urb->context;
-       clear_bit(RX_REG_SET, &dev->flags);
+       if (status < 0)
+               dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status);
+       kfree(req);
+       usb_free_urb(urb);
 }
 
-static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size)
+static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg)
 {
-       int ret;
-
-       if (test_bit(RX_REG_SET, &dev->flags))
-               return -EAGAIN;
+       int res = -ENOMEM;
+       struct urb *async_urb;
+       struct async_req *req;
 
-       dev->dr.bRequestType = RTL8150_REQT_WRITE;
-       dev->dr.bRequest = RTL8150_REQ_SET_REGS;
-       dev->dr.wValue = cpu_to_le16(indx);
-       dev->dr.wIndex = 0;
-       dev->dr.wLength = cpu_to_le16(size);
-       dev->ctrl_urb->transfer_buffer_length = size;
-       usb_fill_control_urb(dev->ctrl_urb, dev->udev,
-                        usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr,
-                        &dev->rx_creg, size, ctrl_callback, dev);
-       if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) {
-               if (ret == -ENODEV)
+       req = kmalloc(sizeof(struct async_req), GFP_ATOMIC);
+       if (req == NULL)
+               return res;
+       async_urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (async_urb == NULL) {
+               kfree(req);
+               return res;
+       }
+       req->rx_creg = cpu_to_le16(reg);
+       req->dr.bRequestType = RTL8150_REQT_WRITE;
+       req->dr.bRequest = RTL8150_REQ_SET_REGS;
+       req->dr.wIndex = 0;
+       req->dr.wValue = cpu_to_le16(indx);
+       req->dr.wLength = cpu_to_le16(size);
+       usb_fill_control_urb(async_urb, dev->udev,
+                            usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr,
+                            &req->rx_creg, size, async_set_reg_cb, req);
+       res = usb_submit_urb(async_urb, GFP_ATOMIC);
+       if (res) {
+               if (res == -ENODEV)
                        netif_device_detach(dev->netdev);
-               dev_err(&dev->udev->dev,
-                       "control request submission failed: %d\n", ret);
-       } else
-               set_bit(RX_REG_SET, &dev->flags);
-
-       return ret;
+               dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res);
+       }
+       return res;
 }
 
 static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
@@ -330,13 +330,6 @@ static int alloc_all_urbs(rtl8150_t * dev)
                usb_free_urb(dev->tx_urb);
                return 0;
        }
-       dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if (!dev->ctrl_urb) {
-               usb_free_urb(dev->rx_urb);
-               usb_free_urb(dev->tx_urb);
-               usb_free_urb(dev->intr_urb);
-               return 0;
-       }
 
        return 1;
 }
@@ -346,7 +339,6 @@ static void free_all_urbs(rtl8150_t * dev)
        usb_free_urb(dev->rx_urb);
        usb_free_urb(dev->tx_urb);
        usb_free_urb(dev->intr_urb);
-       usb_free_urb(dev->ctrl_urb);
 }
 
 static void unlink_all_urbs(rtl8150_t * dev)
@@ -354,7 +346,6 @@ static void unlink_all_urbs(rtl8150_t * dev)
        usb_kill_urb(dev->rx_urb);
        usb_kill_urb(dev->tx_urb);
        usb_kill_urb(dev->intr_urb);
-       usb_kill_urb(dev->ctrl_urb);
 }
 
 static inline struct sk_buff *pull_skb(rtl8150_t *dev)
@@ -629,7 +620,6 @@ static int enable_net_traffic(rtl8150_t * dev)
        }
        /* RCR bit7=1 attach Rx info at the end;  =0 HW CRC (which is broken) */
        rcr = 0x9e;
-       dev->rx_creg = cpu_to_le16(rcr);
        tcr = 0xd8;
        cr = 0x0c;
        if (!(rcr & 0x80))
@@ -662,20 +652,22 @@ static void rtl8150_tx_timeout(struct net_device *netdev)
 static void rtl8150_set_multicast(struct net_device *netdev)
 {
        rtl8150_t *dev = netdev_priv(netdev);
+       u16 rx_creg = 0x9e;
+
        netif_stop_queue(netdev);
        if (netdev->flags & IFF_PROMISC) {
-               dev->rx_creg |= cpu_to_le16(0x0001);
+               rx_creg |= 0x0001;
                dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
        } else if (!netdev_mc_empty(netdev) ||
                   (netdev->flags & IFF_ALLMULTI)) {
-               dev->rx_creg &= cpu_to_le16(0xfffe);
-               dev->rx_creg |= cpu_to_le16(0x0002);
+               rx_creg &= 0xfffe;
+               rx_creg |= 0x0002;
                dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
        } else {
                /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
-               dev->rx_creg &= cpu_to_le16(0x00fc);
+               rx_creg &= 0x00fc;
        }
-       async_set_registers(dev, RCR, 2);
+       async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
        netif_wake_queue(netdev);
 }
 
index f95cb032394bb03f05b7ae2605b7b9585e8e3c67..06ee82f557d45ba31b4847c187f57771ae2c73d2 100644 (file)
@@ -1477,7 +1477,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 
        /* usbnet already took usb runtime pm, so have to enable the feature
         * for usb interface, otherwise usb_autopm_get_interface may return
-        * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+        * failure if RUNTIME_PM is enabled.
         */
        if (!driver->supports_autosuspend) {
                driver->supports_autosuspend = 1;
index 177f911f59462f6770df07ff11b49b12f998dfce..da866523cf20097d57550c7357020e937782fd37 100644 (file)
@@ -379,12 +379,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        else
                snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
 
-       if (strchr(dev->name, '%')) {
-               err = dev_alloc_name(dev, dev->name);
-               if (err < 0)
-                       goto err_alloc_name;
-       }
-
        err = register_netdevice(dev);
        if (err < 0)
                goto err_register_dev;
@@ -404,7 +398,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 
 err_register_dev:
        /* nothing to do */
-err_alloc_name:
 err_configure_peer:
        unregister_netdevice(peer);
        return err;
index 3c23fdc27bf0798085361a64d6262cdaed34c66b..c9e00387d9996e3c5b0660a57bf296e4ff15f3d5 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 
-static int napi_weight = 128;
+static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
 
 static bool csum = true, gso = true;
@@ -636,10 +636,11 @@ static int virtnet_open(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i;
 
-       for (i = 0; i < vi->curr_queue_pairs; i++) {
-               /* Make sure we have some buffers: if oom use wq. */
-               if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
-                       schedule_delayed_work(&vi->refill, 0);
+       for (i = 0; i < vi->max_queue_pairs; i++) {
+               if (i < vi->curr_queue_pairs)
+                       /* Make sure we have some buffers: if oom use wq. */
+                       if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+                               schedule_delayed_work(&vi->refill, 0);
                virtnet_napi_enable(&vi->rq[i]);
        }
 
index ba81f3c39a837af31af507dd1b2c7f4a599fe479..284c6c00c3539c1ddb584b8eb99cbd3f46ccdd6b 100644 (file)
@@ -44,6 +44,8 @@
 
 #define VXLAN_VERSION  "0.1"
 
+#define PORT_HASH_BITS 8
+#define PORT_HASH_SIZE  (1<<PORT_HASH_BITS)
 #define VNI_HASH_BITS  10
 #define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
 #define FDB_HASH_BITS  8
@@ -76,15 +78,25 @@ static bool log_ecn_error = true;
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
-/* per-net private data for this module */
 static unsigned int vxlan_net_id;
-struct vxlan_net {
-       struct socket     *sock;        /* UDP encap socket */
+
+/* per UDP socket information */
+struct vxlan_sock {
+       struct hlist_node hlist;
+       struct rcu_head   rcu;
+       struct work_struct del_work;
+       unsigned int      refcnt;
+       struct socket     *sock;
        struct hlist_head vni_list[VNI_HASH_SIZE];
 };
 
+/* per-network namespace private data for this module */
+struct vxlan_net {
+       struct list_head  vxlan_list;
+       struct hlist_head sock_list[PORT_HASH_SIZE];
+};
+
 struct vxlan_rdst {
-       struct rcu_head          rcu;
        __be32                   remote_ip;
        __be16                   remote_port;
        u32                      remote_vni;
@@ -106,7 +118,9 @@ struct vxlan_fdb {
 
 /* Pseudo network device */
 struct vxlan_dev {
-       struct hlist_node hlist;
+       struct hlist_node hlist;        /* vni hash table */
+       struct list_head  next;         /* vxlan's per namespace list */
+       struct vxlan_sock *vn_sock;     /* listening socket */
        struct net_device *dev;
        struct vxlan_rdst default_dst;  /* default destination */
        __be32            saddr;        /* source address */
@@ -135,19 +149,43 @@ struct vxlan_dev {
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
 
-static inline struct hlist_head *vni_head(struct net *net, u32 id)
+/* Virtual Network hash table head */
+static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
+{
+       return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
+}
+
+/* Socket hash table head */
+static inline struct hlist_head *vs_head(struct net *net, __be16 port)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 
-       return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
+       return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
+}
+
+/* Find VXLAN socket based on network namespace and UDP port */
+static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
+{
+       struct vxlan_sock *vs;
+
+       hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
+               if (inet_sk(vs->sock->sk)->inet_sport == port)
+                       return vs;
+       }
+       return NULL;
 }
 
 /* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
 {
+       struct vxlan_sock *vs;
        struct vxlan_dev *vxlan;
 
-       hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
+       vs = vxlan_find_port(net, port);
+       if (!vs)
+               return NULL;
+
+       hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
                if (vxlan->default_dst.remote_vni == id)
                        return vxlan;
        }
@@ -301,7 +339,7 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
 }
 
 /* Look up Ethernet address in forwarding table */
-static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
                                        const u8 *mac)
 
 {
@@ -316,6 +354,18 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
        return NULL;
 }
 
+static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+                                       const u8 *mac)
+{
+       struct vxlan_fdb *f;
+
+       f = __vxlan_find_mac(vxlan, mac);
+       if (f)
+               f->used = jiffies;
+
+       return f;
+}
+
 /* Add/update destinations for multicast */
 static int vxlan_fdb_append(struct vxlan_fdb *f,
                            __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
@@ -353,7 +403,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
        struct vxlan_fdb *f;
        int notify = 0;
 
-       f = vxlan_find_mac(vxlan, mac);
+       f = __vxlan_find_mac(vxlan, mac);
        if (f) {
                if (flags & NLM_F_EXCL) {
                        netdev_dbg(vxlan->dev,
@@ -553,19 +603,22 @@ skip:
 
 /* Watch incoming packets to learn mapping between Ethernet address
  * and Tunnel endpoint.
+ * Return true if packet is bogus and should be droppped.
  */
-static void vxlan_snoop(struct net_device *dev,
+static bool vxlan_snoop(struct net_device *dev,
                        __be32 src_ip, const u8 *src_mac)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_fdb *f;
-       int err;
 
        f = vxlan_find_mac(vxlan, src_mac);
        if (likely(f)) {
-               f->used = jiffies;
                if (likely(f->remote.remote_ip == src_ip))
-                       return;
+                       return false;
+
+               /* Don't migrate static entries, drop packets */
+               if (f->state & NUD_NOARP)
+                       return true;
 
                if (net_ratelimit())
                        netdev_info(dev,
@@ -577,14 +630,19 @@ static void vxlan_snoop(struct net_device *dev,
        } else {
                /* learned new entry */
                spin_lock(&vxlan->hash_lock);
-               err = vxlan_fdb_create(vxlan, src_mac, src_ip,
-                                      NUD_REACHABLE,
-                                      NLM_F_EXCL|NLM_F_CREATE,
-                                      vxlan->dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      0, NTF_SELF);
+
+               /* close off race between vxlan_flush and incoming packets */
+               if (netif_running(dev))
+                       vxlan_fdb_create(vxlan, src_mac, src_ip,
+                                        NUD_REACHABLE,
+                                        NLM_F_EXCL|NLM_F_CREATE,
+                                        vxlan->dst_port,
+                                        vxlan->default_dst.remote_vni,
+                                        0, NTF_SELF);
                spin_unlock(&vxlan->hash_lock);
        }
+
+       return false;
 }
 
 
@@ -592,20 +650,18 @@ static void vxlan_snoop(struct net_device *dev,
 static bool vxlan_group_used(struct vxlan_net *vn,
                             const struct vxlan_dev *this)
 {
-       const struct vxlan_dev *vxlan;
-       unsigned h;
+       struct vxlan_dev *vxlan;
 
-       for (h = 0; h < VNI_HASH_SIZE; ++h)
-               hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
-                       if (vxlan == this)
-                               continue;
+       list_for_each_entry(vxlan, &vn->vxlan_list, next) {
+               if (vxlan == this)
+                       continue;
 
-                       if (!netif_running(vxlan->dev))
-                               continue;
+               if (!netif_running(vxlan->dev))
+                       continue;
 
-                       if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip)
-                               return true;
-               }
+               if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip)
+                       return true;
+       }
 
        return false;
 }
@@ -615,7 +671,7 @@ static int vxlan_join_group(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
-       struct sock *sk = vn->sock->sk;
+       struct sock *sk = vxlan->vn_sock->sock->sk;
        struct ip_mreqn mreq = {
                .imr_multiaddr.s_addr   = vxlan->default_dst.remote_ip,
                .imr_ifindex            = vxlan->default_dst.remote_ifindex,
@@ -643,7 +699,7 @@ static int vxlan_leave_group(struct net_device *dev)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        int err = 0;
-       struct sock *sk = vn->sock->sk;
+       struct sock *sk = vxlan->vn_sock->sock->sk;
        struct ip_mreqn mreq = {
                .imr_multiaddr.s_addr   = vxlan->default_dst.remote_ip,
                .imr_ifindex            = vxlan->default_dst.remote_ifindex,
@@ -670,6 +726,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        struct vxlanhdr *vxh;
        struct vxlan_dev *vxlan;
        struct pcpu_tstats *stats;
+       __be16 port;
        __u32 vni;
        int err;
 
@@ -693,9 +750,11 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
        /* Is this VNI defined? */
        vni = ntohl(vxh->vx_vni) >> 8;
-       vxlan = vxlan_find_vni(sock_net(sk), vni);
+       port = inet_sk(sk)->inet_sport;
+       vxlan = vxlan_find_vni(sock_net(sk), vni, port);
        if (!vxlan) {
-               netdev_dbg(skb->dev, "unknown vni %d\n", vni);
+               netdev_dbg(skb->dev, "unknown vni %d port %u\n",
+                          vni, ntohs(port));
                goto drop;
        }
 
@@ -716,8 +775,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                               vxlan->dev->dev_addr) == 0)
                goto drop;
 
-       if (vxlan->flags & VXLAN_F_LEARN)
-               vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+       if ((vxlan->flags & VXLAN_F_LEARN) &&
+           vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
+               goto drop;
 
        __skb_tunnel_rx(skb, vxlan->dev);
        skb_reset_network_header(skb);
@@ -875,7 +935,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
        return false;
 }
 
-static void vxlan_sock_free(struct sk_buff *skb)
+static void vxlan_sock_put(struct sk_buff *skb)
 {
        sock_put(skb->sk);
 }
@@ -883,13 +943,13 @@ static void vxlan_sock_free(struct sk_buff *skb)
 /* On transmit, associate with the tunnel socket */
 static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
 {
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
-       struct sock *sk = vn->sock->sk;
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct sock *sk = vxlan->vn_sock->sock->sk;
 
        skb_orphan(skb);
        sock_hold(sk);
        skb->sk = sk;
-       skb->destructor = vxlan_sock_free;
+       skb->destructor = vxlan_sock_put;
 }
 
 /* Compute source port for outgoing packet
@@ -961,7 +1021,6 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct rtable *rt;
        const struct iphdr *old_iph;
-       struct iphdr *iph;
        struct vxlanhdr *vxh;
        struct udphdr *uh;
        struct flowi4 fl4;
@@ -970,6 +1029,7 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
         u32 vni;
        __be16 df = 0;
        __u8 tos, ttl;
+       int err;
 
        dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
        vni = rdst->remote_vni;
@@ -1031,19 +1091,12 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                struct vxlan_dev *dst_vxlan;
 
                ip_rt_put(rt);
-               dst_vxlan = vxlan_find_vni(dev_net(dev), vni);
+               dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
                if (!dst_vxlan)
                        goto tx_error;
                vxlan_encap_bypass(skb, vxlan, dst_vxlan);
                return NETDEV_TX_OK;
        }
-
-       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-       IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
-                             IPSKB_REROUTED);
-       skb_dst_drop(skb);
-       skb_dst_set(skb, &rt->dst);
-
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = htonl(VXLAN_FLAGS);
        vxh->vx_vni = htonl(vni << 8);
@@ -1058,27 +1111,18 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        uh->len = htons(skb->len);
        uh->check = 0;
 
-       __skb_push(skb, sizeof(*iph));
-       skb_reset_network_header(skb);
-       iph             = ip_hdr(skb);
-       iph->version    = 4;
-       iph->ihl        = sizeof(struct iphdr) >> 2;
-       iph->frag_off   = df;
-       iph->protocol   = IPPROTO_UDP;
-       iph->tos        = ip_tunnel_ecn_encap(tos, old_iph, skb);
-       iph->daddr      = dst;
-       iph->saddr      = fl4.saddr;
-       iph->ttl        = ttl ? : ip4_dst_hoplimit(&rt->dst);
-       tunnel_ip_select_ident(skb, old_iph, &rt->dst);
-
-       nf_reset(skb);
-
        vxlan_set_owner(dev, skb);
 
        if (handle_offloads(skb))
                goto drop;
 
-       iptunnel_xmit(skb, dev);
+       tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+       ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+       err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst,
+                           IPPROTO_UDP, tos, ttl, df);
+       iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+
        return NETDEV_TX_OK;
 
 drop:
@@ -1140,9 +1184,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
                struct sk_buff *skb1;
 
                skb1 = skb_clone(skb, GFP_ATOMIC);
-               rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
-               if (rc == NETDEV_TX_OK)
-                       rc = rc1;
+               if (skb1) {
+                       rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+                       if (rc == NETDEV_TX_OK)
+                               rc = rc1;
+               }
        }
 
        rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
@@ -1219,7 +1265,7 @@ static int vxlan_open(struct net_device *dev)
 /* Purge the forwarding table */
 static void vxlan_flush(struct vxlan_dev *vxlan)
 {
-       unsigned h;
+       unsigned int h;
 
        spin_lock_bh(&vxlan->hash_lock);
        for (h = 0; h < FDB_HASH_SIZE; ++h) {
@@ -1283,7 +1329,7 @@ static void vxlan_free(struct net_device *dev)
 static void vxlan_setup(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       unsigned h;
+       unsigned int h;
        int low, high;
 
        eth_hw_addr_random(dev);
@@ -1306,6 +1352,7 @@ static void vxlan_setup(struct net_device *dev)
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
+       INIT_LIST_HEAD(&vxlan->next);
        spin_lock_init(&vxlan->hash_lock);
 
        init_timer_deferrable(&vxlan->age_timer);
@@ -1390,11 +1437,78 @@ static const struct ethtool_ops vxlan_ethtool_ops = {
        .get_link       = ethtool_op_get_link,
 };
 
+static void vxlan_del_work(struct work_struct *work)
+{
+       struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
+
+       sk_release_kernel(vs->sock->sk);
+       kfree_rcu(vs, rcu);
+}
+
+/* Create new listen socket if needed */
+static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
+{
+       struct vxlan_sock *vs;
+       struct sock *sk;
+       struct sockaddr_in vxlan_addr = {
+               .sin_family = AF_INET,
+               .sin_addr.s_addr = htonl(INADDR_ANY),
+       };
+       int rc;
+       unsigned int h;
+
+       vs = kmalloc(sizeof(*vs), GFP_KERNEL);
+       if (!vs)
+               return ERR_PTR(-ENOMEM);
+
+       for (h = 0; h < VNI_HASH_SIZE; ++h)
+               INIT_HLIST_HEAD(&vs->vni_list[h]);
+
+       INIT_WORK(&vs->del_work, vxlan_del_work);
+
+       /* Create UDP socket for encapsulation receive. */
+       rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
+       if (rc < 0) {
+               pr_debug("UDP socket create failed\n");
+               kfree(vs);
+               return ERR_PTR(rc);
+       }
+
+       /* Put in proper namespace */
+       sk = vs->sock->sk;
+       sk_change_net(sk, net);
+
+       vxlan_addr.sin_port = port;
+
+       rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
+                        sizeof(vxlan_addr));
+       if (rc < 0) {
+               pr_debug("bind for UDP socket %pI4:%u (%d)\n",
+                        &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
+               sk_release_kernel(sk);
+               kfree(vs);
+               return ERR_PTR(rc);
+       }
+
+       /* Disable multicast loopback */
+       inet_sk(sk)->mc_loop = 0;
+
+       /* Mark socket as an encapsulation socket. */
+       udp_sk(sk)->encap_type = 1;
+       udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
+       udp_encap_enable();
+
+       vs->refcnt = 1;
+       return vs;
+}
+
 static int vxlan_newlink(struct net *net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_rdst *dst = &vxlan->default_dst;
+       struct vxlan_sock *vs;
        __u32 vni;
        int err;
 
@@ -1402,10 +1516,6 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
                return -EINVAL;
 
        vni = nla_get_u32(data[IFLA_VXLAN_ID]);
-       if (vxlan_find_vni(net, vni)) {
-               pr_info("duplicate VNI %u\n", vni);
-               return -EEXIST;
-       }
        dst->remote_vni = vni;
 
        if (data[IFLA_VXLAN_GROUP])
@@ -1471,22 +1581,58 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (data[IFLA_VXLAN_PORT])
                vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
 
+       if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
+               pr_info("duplicate VNI %u\n", vni);
+               return -EEXIST;
+       }
+
+       vs = vxlan_find_port(net, vxlan->dst_port);
+       if (vs)
+               ++vs->refcnt;
+       else {
+               /* Drop lock because socket create acquires RTNL lock */
+               rtnl_unlock();
+               vs = vxlan_socket_create(net, vxlan->dst_port);
+               rtnl_lock();
+               if (IS_ERR(vs))
+                       return PTR_ERR(vs);
+
+               hlist_add_head_rcu(&vs->hlist, vs_head(net, vxlan->dst_port));
+       }
+       vxlan->vn_sock = vs;
+
        SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
 
        err = register_netdevice(dev);
-       if (!err)
-               hlist_add_head_rcu(&vxlan->hlist, vni_head(net, dst->remote_vni));
+       if (err) {
+               if (--vs->refcnt == 0) {
+                       rtnl_unlock();
+                       sk_release_kernel(vs->sock->sk);
+                       kfree(vs);
+                       rtnl_lock();
+               }
+               return err;
+       }
 
-       return err;
+       list_add(&vxlan->next, &vn->vxlan_list);
+       hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+
+       return 0;
 }
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_sock *vs = vxlan->vn_sock;
 
        hlist_del_rcu(&vxlan->hlist);
-
+       list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
+
+       if (--vs->refcnt == 0) {
+               hlist_del_rcu(&vs->hlist);
+               schedule_work(&vs->del_work);
+       }
 }
 
 static size_t vxlan_get_size(const struct net_device *dev)
@@ -1572,46 +1718,12 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
 static __net_init int vxlan_init_net(struct net *net)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-       struct sock *sk;
-       struct sockaddr_in vxlan_addr = {
-               .sin_family = AF_INET,
-               .sin_addr.s_addr = htonl(INADDR_ANY),
-       };
-       int rc;
-       unsigned h;
-
-       /* Create UDP socket for encapsulation receive. */
-       rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
-       if (rc < 0) {
-               pr_debug("UDP socket create failed\n");
-               return rc;
-       }
-       /* Put in proper namespace */
-       sk = vn->sock->sk;
-       sk_change_net(sk, net);
-
-       vxlan_addr.sin_port = htons(vxlan_port);
-
-       rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
-                        sizeof(vxlan_addr));
-       if (rc < 0) {
-               pr_debug("bind for UDP socket %pI4:%u (%d)\n",
-                        &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
-               sk_release_kernel(sk);
-               vn->sock = NULL;
-               return rc;
-       }
-
-       /* Disable multicast loopback */
-       inet_sk(sk)->mc_loop = 0;
+       unsigned int h;
 
-       /* Mark socket as an encapsulation socket. */
-       udp_sk(sk)->encap_type = 1;
-       udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
-       udp_encap_enable();
+       INIT_LIST_HEAD(&vn->vxlan_list);
 
-       for (h = 0; h < VNI_HASH_SIZE; ++h)
-               INIT_HLIST_HEAD(&vn->vni_list[h]);
+       for (h = 0; h < PORT_HASH_SIZE; ++h)
+               INIT_HLIST_HEAD(&vn->sock_list[h]);
 
        return 0;
 }
@@ -1620,18 +1732,11 @@ static __net_exit void vxlan_exit_net(struct net *net)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan;
-       unsigned h;
 
        rtnl_lock();
-       for (h = 0; h < VNI_HASH_SIZE; ++h)
-               hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
-                       dev_close(vxlan->dev);
+       list_for_each_entry(vxlan, &vn->vxlan_list, next)
+               dev_close(vxlan->dev);
        rtnl_unlock();
-
-       if (vn->sock) {
-               sk_release_kernel(vn->sock->sk);
-               vn->sock = NULL;
-       }
 }
 
 static struct pernet_operations vxlan_net_ops = {
@@ -1662,7 +1767,7 @@ out2:
 out1:
        return rc;
 }
-module_init(vxlan_init_module);
+late_initcall(vxlan_init_module);
 
 static void __exit vxlan_cleanup_module(void)
 {
index 147614ed86aa8ae5d4a8a5451304337a70bb0960..70ac59929f800af871f422a4a18b91102e4e78df 100644 (file)
@@ -477,7 +477,7 @@ static void dlci_setup(struct net_device *dev)
 static int dlci_dev_event(struct notifier_block *unused,
                          unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *) ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (dev_net(dev) != &init_net)
                return NOTIFY_DONE;
index a0a932c63d0a29e367daad9febe6ccf7f6370f3d..9c33ca918e19f7c03bc66ee8aff4b5365ec031eb 100644 (file)
@@ -99,7 +99,7 @@ static inline void hdlc_proto_stop(struct net_device *dev)
 static int hdlc_device_event(struct notifier_block *this, unsigned long event,
                             void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        hdlc_device *hdlc;
        unsigned long flags;
        int on;
index fc9d11d74d605de87107d9b8642732dd1944f7da..e7bbdb7af53ac7dcbdd21d12705613054efe8626 100644 (file)
@@ -1384,7 +1384,6 @@ static int hss_remove_one(struct platform_device *pdev)
        unregister_hdlc_device(port->netdev);
        free_netdev(port->netdev);
        npe_release(port->npe);
-       platform_set_drvdata(pdev, NULL);
        kfree(port);
        return 0;
 }
index a73b49eb87e37a150c6096299a08460ad921d32d..a33a46fa88dd0216871502622a0f06b976ac94d5 100644 (file)
@@ -370,7 +370,7 @@ static int lapbeth_device_event(struct notifier_block *this,
                                unsigned long event, void *ptr)
 {
        struct lapbethdev *lapbeth;
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (dev_net(dev) != &init_net)
                return NOTIFY_DONE;
index 8e8bcc7a4805914ba64888ea683c13b3b76ddb0a..e9bc9e616b69c94e4c6758ae2c2327768778b942 100644 (file)
@@ -185,7 +185,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
 
  err_free_hw:
        ieee80211_free_hw(hw);
-       platform_set_drvdata(pdev, NULL);
  err_iounmap:
         iounmap(mem);
  err_out:
@@ -221,7 +220,6 @@ static int ath_ahb_remove(struct platform_device *pdev)
 
        ath5k_deinit_ah(ah);
        iounmap(ah->iobase);
-       platform_set_drvdata(pdev, NULL);
        ieee80211_free_hw(hw);
 
        return 0;
index d1ff3c246a1232cd1be306fbddce97df9a3d0d28..072e4b53106765ce8e0d6c833652501a7abde62d 100644 (file)
@@ -150,7 +150,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
        free_irq(irq, sc);
  err_free_hw:
        ieee80211_free_hw(hw);
-       platform_set_drvdata(pdev, NULL);
        return ret;
 }
 
@@ -164,7 +163,6 @@ static int ath_ahb_remove(struct platform_device *pdev)
                ath9k_deinit_device(sc);
                free_irq(sc->irq, sc);
                ieee80211_free_hw(sc->hw);
-               platform_set_drvdata(pdev, NULL);
        }
 
        return 0;
index 2721f52f0177ba7a4402521328a6029c80ed57c4..87454f6c7b4f0b1af790d8cab084e108f44f1d8c 100644 (file)
@@ -69,7 +69,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &mask))
+       if (kstrtoul(buf, 0, &mask))
                return -EINVAL;
 
        common->debug_mask = mask;
@@ -114,7 +114,7 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &mask))
+       if (kstrtoul(buf, 0, &mask))
                return -EINVAL;
 
        ah->txchainmask = mask;
@@ -157,7 +157,7 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &mask))
+       if (kstrtoul(buf, 0, &mask))
                return -EINVAL;
 
        ah->rxchainmask = mask;
@@ -244,7 +244,7 @@ static ssize_t write_file_ani(struct file *file,
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &ani))
+       if (kstrtoul(buf, 0, &ani))
                return -EINVAL;
 
        if (ani < 0 || ani > 1)
@@ -300,7 +300,7 @@ static ssize_t write_file_ant_diversity(struct file *file,
                goto exit;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &antenna_diversity))
+       if (kstrtoul(buf, 0, &antenna_diversity))
                return -EINVAL;
 
        common->antenna_diversity = !!antenna_diversity;
@@ -1270,7 +1270,7 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &regidx))
+       if (kstrtoul(buf, 0, &regidx))
                return -EINVAL;
 
        sc->debug.regidx = regidx;
@@ -1315,7 +1315,7 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &regval))
+       if (kstrtoul(buf, 0, &regval))
                return -EINVAL;
 
        ath9k_ps_wakeup(sc);
index b7611b7bbe437e289bd5f452c5cdf6d125a6c702..3c6e4138a95d13405ed8b2cff14ffbd5a2f52bb5 100644 (file)
@@ -96,7 +96,7 @@ static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &val))
+       if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
        if (val == DFS_STATS_RESET_MAGIC)
index 7416d58a122c2ad9b22ebcdc7fbbc64c74b2ad02..c1b45e2f848124bdeb0d28a156ed570f4928e635 100644 (file)
@@ -471,7 +471,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &mask))
+       if (kstrtoul(buf, 0, &mask))
                return -EINVAL;
 
        common->debug_mask = mask;
index 866ce6c684055e3e6a06f79681c75f73f9ccae27..9581d07a4242bbdabef6174e60560d42aa4e6b09 100644 (file)
@@ -3119,7 +3119,7 @@ il3945_store_debug_level(struct device *d, struct device_attribute *attr,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret)
                IL_INFO("%s is not in hex or decimal form.\n", buf);
        else
index d287fd2dce4250f636040a2e933e9c6ca5c9b394..b9b2bb51e60590ab7dfb91d4e284a6a0c6c259a7 100644 (file)
@@ -4585,7 +4585,7 @@ il4965_store_debug_level(struct device *d, struct device_attribute *attr,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret)
                IL_ERR("%s is not in hex or decimal form.\n", buf);
        else
@@ -4632,7 +4632,7 @@ il4965_store_tx_power(struct device *d, struct device_attribute *attr,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (ret)
                IL_INFO("%s is not in decimal form.\n", buf);
        else {
index 3e81264db81e937d25b3dda0a5fb7543d4c4379e..efae07e05c807cf12fe9081723377eb9bc895e81 100644 (file)
@@ -240,7 +240,7 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
        memset(&mesh_access, 0, sizeof(mesh_access));
        mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
 
-       if (!strict_strtoul(buf, 10, &retry_limit))
+       if (!kstrtoul(buf, 10, &retry_limit))
                return -ENOTSUPP;
        if (retry_limit > 15)
                return -ENOTSUPP;
index af59dd5718e1b90eab623b8b04bf94b0332fce45..8053f775d392d68479fb996dd4ac5582a99691b4 100644 (file)
@@ -1817,7 +1817,7 @@ static ssize_t rtl_store_debug_level(struct device *d,
        unsigned long val;
        int ret;
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
        if (ret) {
                printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
        } else {
index 9d7f1723dd8f750126337d7792a0614fea8ca601..8a4d77ee9c5b61a312c3f2142ffa42fc0671e122 100644 (file)
@@ -57,8 +57,12 @@ struct xenvif {
 
        u8               fe_dev_addr[6];
 
-       /* Physical parameters of the comms window. */
-       unsigned int     irq;
+       /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
+       unsigned int tx_irq;
+       unsigned int rx_irq;
+       /* Only used when feature-split-event-channels = 1 */
+       char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+       char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
 
        /* List of frontends to notify after a batch of frames sent. */
        struct list_head notify_list;
@@ -113,13 +117,15 @@ struct xenvif *xenvif_alloc(struct device *parent,
                            unsigned int handle);
 
 int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
-                  unsigned long rx_ring_ref, unsigned int evtchn);
+                  unsigned long rx_ring_ref, unsigned int tx_evtchn,
+                  unsigned int rx_evtchn);
 void xenvif_disconnect(struct xenvif *vif);
 
 void xenvif_get(struct xenvif *vif);
 void xenvif_put(struct xenvif *vif);
 
 int xenvif_xenbus_init(void);
+void xenvif_xenbus_fini(void);
 
 int xenvif_schedulable(struct xenvif *vif);
 
@@ -157,4 +163,6 @@ void xenvif_carrier_off(struct xenvif *vif);
 /* Returns number of ring slots required to send an skb to the frontend */
 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
 
+extern bool separate_tx_rx_irq;
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
index d984141684857c1708085af50ad8fed208a06f85..087d2db0389d04efc3368a0ba48a925a3b070124 100644 (file)
@@ -60,21 +60,39 @@ static int xenvif_rx_schedulable(struct xenvif *vif)
        return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
 }
 
-static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 {
        struct xenvif *vif = dev_id;
 
        if (vif->netbk == NULL)
-               return IRQ_NONE;
+               return IRQ_HANDLED;
 
        xen_netbk_schedule_xenvif(vif);
 
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
+{
+       struct xenvif *vif = dev_id;
+
+       if (vif->netbk == NULL)
+               return IRQ_HANDLED;
+
        if (xenvif_rx_schedulable(vif))
                netif_wake_queue(vif->dev);
 
        return IRQ_HANDLED;
 }
 
+static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+{
+       xenvif_tx_interrupt(irq, dev_id);
+       xenvif_rx_interrupt(irq, dev_id);
+
+       return IRQ_HANDLED;
+}
+
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
@@ -125,13 +143,17 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 static void xenvif_up(struct xenvif *vif)
 {
        xen_netbk_add_xenvif(vif);
-       enable_irq(vif->irq);
+       enable_irq(vif->tx_irq);
+       if (vif->tx_irq != vif->rx_irq)
+               enable_irq(vif->rx_irq);
        xen_netbk_check_rx_xenvif(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
 {
-       disable_irq(vif->irq);
+       disable_irq(vif->tx_irq);
+       if (vif->tx_irq != vif->rx_irq)
+               disable_irq(vif->rx_irq);
        del_timer_sync(&vif->credit_timeout);
        xen_netbk_deschedule_xenvif(vif);
        xen_netbk_remove_xenvif(vif);
@@ -308,25 +330,52 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
 }
 
 int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
-                  unsigned long rx_ring_ref, unsigned int evtchn)
+                  unsigned long rx_ring_ref, unsigned int tx_evtchn,
+                  unsigned int rx_evtchn)
 {
        int err = -ENOMEM;
 
        /* Already connected through? */
-       if (vif->irq)
+       if (vif->tx_irq)
                return 0;
 
+       __module_get(THIS_MODULE);
+
        err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
        if (err < 0)
                goto err;
 
-       err = bind_interdomain_evtchn_to_irqhandler(
-               vif->domid, evtchn, xenvif_interrupt, 0,
-               vif->dev->name, vif);
-       if (err < 0)
-               goto err_unmap;
-       vif->irq = err;
-       disable_irq(vif->irq);
+       if (tx_evtchn == rx_evtchn) {
+               /* feature-split-event-channels == 0 */
+               err = bind_interdomain_evtchn_to_irqhandler(
+                       vif->domid, tx_evtchn, xenvif_interrupt, 0,
+                       vif->dev->name, vif);
+               if (err < 0)
+                       goto err_unmap;
+               vif->tx_irq = vif->rx_irq = err;
+               disable_irq(vif->tx_irq);
+       } else {
+               /* feature-split-event-channels == 1 */
+               snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
+                        "%s-tx", vif->dev->name);
+               err = bind_interdomain_evtchn_to_irqhandler(
+                       vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
+                       vif->tx_irq_name, vif);
+               if (err < 0)
+                       goto err_unmap;
+               vif->tx_irq = err;
+               disable_irq(vif->tx_irq);
+
+               snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
+                        "%s-rx", vif->dev->name);
+               err = bind_interdomain_evtchn_to_irqhandler(
+                       vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
+                       vif->rx_irq_name, vif);
+               if (err < 0)
+                       goto err_tx_unbind;
+               vif->rx_irq = err;
+               disable_irq(vif->rx_irq);
+       }
 
        xenvif_get(vif);
 
@@ -340,9 +389,13 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
        rtnl_unlock();
 
        return 0;
+err_tx_unbind:
+       unbind_from_irqhandler(vif->tx_irq, vif);
+       vif->tx_irq = 0;
 err_unmap:
        xen_netbk_unmap_frontend_rings(vif);
 err:
+       module_put(THIS_MODULE);
        return err;
 }
 
@@ -360,18 +413,37 @@ void xenvif_carrier_off(struct xenvif *vif)
 
 void xenvif_disconnect(struct xenvif *vif)
 {
+       /* Disconnect funtion might get called by generic framework
+        * even before vif connects, so we need to check if we really
+        * need to do a module_put.
+        */
+       int need_module_put = 0;
+
        if (netif_carrier_ok(vif->dev))
                xenvif_carrier_off(vif);
 
        atomic_dec(&vif->refcnt);
        wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
 
-       if (vif->irq)
-               unbind_from_irqhandler(vif->irq, vif);
+       if (vif->tx_irq) {
+               if (vif->tx_irq == vif->rx_irq)
+                       unbind_from_irqhandler(vif->tx_irq, vif);
+               else {
+                       unbind_from_irqhandler(vif->tx_irq, vif);
+                       unbind_from_irqhandler(vif->rx_irq, vif);
+               }
+               /* vif->irq is valid, we had a module_get in
+                * xenvif_connect.
+                */
+               need_module_put = 1;
+       }
 
        unregister_netdev(vif->dev);
 
        xen_netbk_unmap_frontend_rings(vif);
 
        free_netdev(vif->dev);
+
+       if (need_module_put)
+               module_put(THIS_MODULE);
 }
index 37984e6d4e99f9d81dddd760e01ac416010bdea8..130bcb217d2cc590c040dd2f5a6590ae2e76d0c0 100644 (file)
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
 
+/* Provide an option to disable split event channels at load time as
+ * event channels are limited resource. Split event channels are
+ * enabled by default.
+ */
+bool separate_tx_rx_irq = 1;
+module_param(separate_tx_rx_irq, bool, 0644);
+
 /*
  * This is the maximum slots a skb can have. If a guest sends a skb
  * which exceeds this limit it is considered malicious.
@@ -662,7 +669,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
 {
        struct xenvif *vif = NULL, *tmp;
        s8 status;
-       u16 irq, flags;
+       u16 flags;
        struct xen_netif_rx_response *resp;
        struct sk_buff_head rxq;
        struct sk_buff *skb;
@@ -771,20 +778,21 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
                                         sco->meta_slots_used);
 
                RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
-               irq = vif->irq;
-               if (ret && list_empty(&vif->notify_list))
-                       list_add_tail(&vif->notify_list, &notify);
 
                xenvif_notify_tx_completion(vif);
 
-               xenvif_put(vif);
+               if (ret && list_empty(&vif->notify_list))
+                       list_add_tail(&vif->notify_list, &notify);
+               else
+                       xenvif_put(vif);
                npo.meta_cons += sco->meta_slots_used;
                dev_kfree_skb(skb);
        }
 
        list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
-               notify_remote_via_irq(vif->irq);
+               notify_remote_via_irq(vif->rx_irq);
                list_del_init(&vif->notify_list);
+               xenvif_put(vif);
        }
 
        /* More work to do? */
@@ -1762,7 +1770,7 @@ static void make_tx_response(struct xenvif *vif,
        vif->tx.rsp_prod_pvt = ++i;
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
        if (notify)
-               notify_remote_via_irq(vif->irq);
+               notify_remote_via_irq(vif->tx_irq);
 }
 
 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
@@ -1939,10 +1947,6 @@ static int __init netback_init(void)
 failed_init:
        while (--group >= 0) {
                struct xen_netbk *netbk = &xen_netbk[group];
-               for (i = 0; i < MAX_PENDING_REQS; i++) {
-                       if (netbk->mmap_pages[i])
-                               __free_page(netbk->mmap_pages[i]);
-               }
                del_timer(&netbk->net_timer);
                kthread_stop(netbk->task);
        }
@@ -1953,5 +1957,25 @@ failed_init:
 
 module_init(netback_init);
 
+static void __exit netback_fini(void)
+{
+       int i, j;
+
+       xenvif_xenbus_fini();
+
+       for (i = 0; i < xen_netbk_group_nr; i++) {
+               struct xen_netbk *netbk = &xen_netbk[i];
+               del_timer_sync(&netbk->net_timer);
+               kthread_stop(netbk->task);
+               for (j = 0; j < MAX_PENDING_REQS; j++) {
+                       if (netbk->mmap_pages[j])
+                               __free_page(netbk->mmap_pages[j]);
+               }
+       }
+
+       vfree(xen_netbk);
+}
+module_exit(netback_fini);
+
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_ALIAS("xen-backend:vif");
index 410018c4c52818f0b3a697d64e4300a505825f91..04bd860d16a9c29afb6458906529d3f38aa6d72f 100644 (file)
@@ -122,6 +122,16 @@ static int netback_probe(struct xenbus_device *dev,
                goto fail;
        }
 
+       /*
+        * Split event channels support, this is optional so it is not
+        * put inside the above loop.
+        */
+       err = xenbus_printf(XBT_NIL, dev->nodename,
+                           "feature-split-event-channels",
+                           "%u", separate_tx_rx_irq);
+       if (err)
+               pr_debug("Error writing feature-split-event-channels");
+
        err = xenbus_switch_state(dev, XenbusStateInitWait);
        if (err)
                goto fail;
@@ -393,21 +403,36 @@ static int connect_rings(struct backend_info *be)
        struct xenvif *vif = be->vif;
        struct xenbus_device *dev = be->dev;
        unsigned long tx_ring_ref, rx_ring_ref;
-       unsigned int evtchn, rx_copy;
+       unsigned int tx_evtchn, rx_evtchn, rx_copy;
        int err;
        int val;
 
        err = xenbus_gather(XBT_NIL, dev->otherend,
                            "tx-ring-ref", "%lu", &tx_ring_ref,
-                           "rx-ring-ref", "%lu", &rx_ring_ref,
-                           "event-channel", "%u", &evtchn, NULL);
+                           "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
        if (err) {
                xenbus_dev_fatal(dev, err,
-                                "reading %s/ring-ref and event-channel",
+                                "reading %s/ring-ref",
                                 dev->otherend);
                return err;
        }
 
+       /* Try split event channels first, then single event channel. */
+       err = xenbus_gather(XBT_NIL, dev->otherend,
+                           "event-channel-tx", "%u", &tx_evtchn,
+                           "event-channel-rx", "%u", &rx_evtchn, NULL);
+       if (err < 0) {
+               err = xenbus_scanf(XBT_NIL, dev->otherend,
+                                  "event-channel", "%u", &tx_evtchn);
+               if (err < 0) {
+                       xenbus_dev_fatal(dev, err,
+                                        "reading %s/event-channel(-tx/rx)",
+                                        dev->otherend);
+                       return err;
+               }
+               rx_evtchn = tx_evtchn;
+       }
+
        err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
                           &rx_copy);
        if (err == -ENOENT) {
@@ -454,11 +479,13 @@ static int connect_rings(struct backend_info *be)
        vif->csum = !val;
 
        /* Map the shared frame, irq etc. */
-       err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn);
+       err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
+                            tx_evtchn, rx_evtchn);
        if (err) {
                xenbus_dev_fatal(dev, err,
-                                "mapping shared-frames %lu/%lu port %u",
-                                tx_ring_ref, rx_ring_ref, evtchn);
+                                "mapping shared-frames %lu/%lu port tx %u rx %u",
+                                tx_ring_ref, rx_ring_ref,
+                                tx_evtchn, rx_evtchn);
                return err;
        }
        return 0;
@@ -485,3 +512,8 @@ int xenvif_xenbus_init(void)
 {
        return xenbus_register_backend(&netback_driver);
 }
+
+void xenvif_xenbus_fini(void)
+{
+       return xenbus_unregister_driver(&netback_driver);
+}
index 1db101415069726fc59d26443f773fc0432f52a6..76a22365d4e9ebfecdb20ea8270ca3760c07c36c 100644 (file)
@@ -85,7 +85,15 @@ struct netfront_info {
 
        struct napi_struct napi;
 
-       unsigned int evtchn;
+       /* Split event channels support, tx_* == rx_* when using
+        * single event channel.
+        */
+       unsigned int tx_evtchn, rx_evtchn;
+       unsigned int tx_irq, rx_irq;
+       /* Only used when split event channels support is enabled */
+       char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+       char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
+
        struct xenbus_device *xbdev;
 
        spinlock_t   tx_lock;
@@ -330,7 +338,7 @@ no_skb:
  push:
        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
        if (notify)
-               notify_remote_via_irq(np->netdev->irq);
+               notify_remote_via_irq(np->rx_irq);
 }
 
 static int xennet_open(struct net_device *dev)
@@ -623,7 +631,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
        if (notify)
-               notify_remote_via_irq(np->netdev->irq);
+               notify_remote_via_irq(np->tx_irq);
 
        u64_stats_update_begin(&stats->syncp);
        stats->tx_bytes += skb->len;
@@ -850,7 +858,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 {
        struct iphdr *iph;
-       unsigned char *th;
        int err = -EPROTO;
        int recalculate_partial_csum = 0;
 
@@ -875,27 +882,27 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
                goto out;
 
        iph = (void *)skb->data;
-       th = skb->data + 4 * iph->ihl;
-       if (th >= skb_tail_pointer(skb))
-               goto out;
 
-       skb->csum_start = th - skb->head;
        switch (iph->protocol) {
        case IPPROTO_TCP:
-               skb->csum_offset = offsetof(struct tcphdr, check);
+               if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+                                         offsetof(struct tcphdr, check)))
+                       goto out;
 
                if (recalculate_partial_csum) {
-                       struct tcphdr *tcph = (struct tcphdr *)th;
+                       struct tcphdr *tcph = tcp_hdr(skb);
                        tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                         skb->len - iph->ihl*4,
                                                         IPPROTO_TCP, 0);
                }
                break;
        case IPPROTO_UDP:
-               skb->csum_offset = offsetof(struct udphdr, check);
+               if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+                                         offsetof(struct udphdr, check)))
+                       goto out;
 
                if (recalculate_partial_csum) {
-                       struct udphdr *udph = (struct udphdr *)th;
+                       struct udphdr *udph = udp_hdr(skb);
                        udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                         skb->len - iph->ihl*4,
                                                         IPPROTO_UDP, 0);
@@ -909,9 +916,6 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
                goto out;
        }
 
-       if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
-               goto out;
-
        err = 0;
 
 out:
@@ -1254,23 +1258,35 @@ static int xennet_set_features(struct net_device *dev,
        return 0;
 }
 
-static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
 {
-       struct net_device *dev = dev_id;
-       struct netfront_info *np = netdev_priv(dev);
+       struct netfront_info *np = dev_id;
+       struct net_device *dev = np->netdev;
        unsigned long flags;
 
        spin_lock_irqsave(&np->tx_lock, flags);
+       xennet_tx_buf_gc(dev);
+       spin_unlock_irqrestore(&np->tx_lock, flags);
 
-       if (likely(netif_carrier_ok(dev))) {
-               xennet_tx_buf_gc(dev);
-               /* Under tx_lock: protects access to rx shared-ring indexes. */
-               if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+{
+       struct netfront_info *np = dev_id;
+       struct net_device *dev = np->netdev;
+
+       if (likely(netif_carrier_ok(dev) &&
+                  RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
                        napi_schedule(&np->napi);
-       }
 
-       spin_unlock_irqrestore(&np->tx_lock, flags);
+       return IRQ_HANDLED;
+}
 
+static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+{
+       xennet_tx_interrupt(irq, dev_id);
+       xennet_rx_interrupt(irq, dev_id);
        return IRQ_HANDLED;
 }
 
@@ -1451,9 +1467,14 @@ static void xennet_disconnect_backend(struct netfront_info *info)
        spin_unlock_irq(&info->tx_lock);
        spin_unlock_bh(&info->rx_lock);
 
-       if (info->netdev->irq)
-               unbind_from_irqhandler(info->netdev->irq, info->netdev);
-       info->evtchn = info->netdev->irq = 0;
+       if (info->tx_irq && (info->tx_irq == info->rx_irq))
+               unbind_from_irqhandler(info->tx_irq, info);
+       if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
+               unbind_from_irqhandler(info->tx_irq, info);
+               unbind_from_irqhandler(info->rx_irq, info);
+       }
+       info->tx_evtchn = info->rx_evtchn = 0;
+       info->tx_irq = info->rx_irq = 0;
 
        /* End access and free the pages */
        xennet_end_access(info->tx_ring_ref, info->tx.sring);
@@ -1503,12 +1524,82 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
        return 0;
 }
 
+static int setup_netfront_single(struct netfront_info *info)
+{
+       int err;
+
+       err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
+       if (err < 0)
+               goto fail;
+
+       err = bind_evtchn_to_irqhandler(info->tx_evtchn,
+                                       xennet_interrupt,
+                                       0, info->netdev->name, info);
+       if (err < 0)
+               goto bind_fail;
+       info->rx_evtchn = info->tx_evtchn;
+       info->rx_irq = info->tx_irq = err;
+
+       return 0;
+
+bind_fail:
+       xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
+       info->tx_evtchn = 0;
+fail:
+       return err;
+}
+
+static int setup_netfront_split(struct netfront_info *info)
+{
+       int err;
+
+       err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
+       if (err < 0)
+               goto fail;
+       err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
+       if (err < 0)
+               goto alloc_rx_evtchn_fail;
+
+       snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
+                "%s-tx", info->netdev->name);
+       err = bind_evtchn_to_irqhandler(info->tx_evtchn,
+                                       xennet_tx_interrupt,
+                                       0, info->tx_irq_name, info);
+       if (err < 0)
+               goto bind_tx_fail;
+       info->tx_irq = err;
+
+       snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
+                "%s-rx", info->netdev->name);
+       err = bind_evtchn_to_irqhandler(info->rx_evtchn,
+                                       xennet_rx_interrupt,
+                                       0, info->rx_irq_name, info);
+       if (err < 0)
+               goto bind_rx_fail;
+       info->rx_irq = err;
+
+       return 0;
+
+bind_rx_fail:
+       unbind_from_irqhandler(info->tx_irq, info);
+       info->tx_irq = 0;
+bind_tx_fail:
+       xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
+       info->rx_evtchn = 0;
+alloc_rx_evtchn_fail:
+       xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
+       info->tx_evtchn = 0;
+fail:
+       return err;
+}
+
 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
 {
        struct xen_netif_tx_sring *txs;
        struct xen_netif_rx_sring *rxs;
        int err;
        struct net_device *netdev = info->netdev;
+       unsigned int feature_split_evtchn;
 
        info->tx_ring_ref = GRANT_INVALID_REF;
        info->rx_ring_ref = GRANT_INVALID_REF;
@@ -1516,6 +1607,12 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
        info->tx.sring = NULL;
        netdev->irq = 0;
 
+       err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+                          "feature-split-event-channels", "%u",
+                          &feature_split_evtchn);
+       if (err < 0)
+               feature_split_evtchn = 0;
+
        err = xen_net_read_mac(dev, netdev->dev_addr);
        if (err) {
                xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
@@ -1532,40 +1629,50 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
        FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
 
        err = xenbus_grant_ring(dev, virt_to_mfn(txs));
-       if (err < 0) {
-               free_page((unsigned long)txs);
-               goto fail;
-       }
+       if (err < 0)
+               goto grant_tx_ring_fail;
 
        info->tx_ring_ref = err;
        rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
        if (!rxs) {
                err = -ENOMEM;
                xenbus_dev_fatal(dev, err, "allocating rx ring page");
-               goto fail;
+               goto alloc_rx_ring_fail;
        }
        SHARED_RING_INIT(rxs);
        FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
 
        err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
-       if (err < 0) {
-               free_page((unsigned long)rxs);
-               goto fail;
-       }
+       if (err < 0)
+               goto grant_rx_ring_fail;
        info->rx_ring_ref = err;
 
-       err = xenbus_alloc_evtchn(dev, &info->evtchn);
+       if (feature_split_evtchn)
+               err = setup_netfront_split(info);
+       /* setup single event channel if
+        *  a) feature-split-event-channels == 0
+        *  b) feature-split-event-channels == 1 but failed to setup
+        */
+       if (!feature_split_evtchn || (feature_split_evtchn && err))
+               err = setup_netfront_single(info);
+
        if (err)
-               goto fail;
+               goto alloc_evtchn_fail;
 
-       err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
-                                       0, netdev->name, netdev);
-       if (err < 0)
-               goto fail;
-       netdev->irq = err;
        return 0;
 
- fail:
+       /* If we fail to setup netfront, it is safe to just revoke access to
+        * granted pages because backend is not accessing it at this point.
+        */
+alloc_evtchn_fail:
+       gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
+grant_rx_ring_fail:
+       free_page((unsigned long)rxs);
+alloc_rx_ring_fail:
+       gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
+grant_tx_ring_fail:
+       free_page((unsigned long)txs);
+fail:
        return err;
 }
 
@@ -1601,11 +1708,27 @@ again:
                message = "writing rx ring-ref";
                goto abort_transaction;
        }
-       err = xenbus_printf(xbt, dev->nodename,
-                           "event-channel", "%u", info->evtchn);
-       if (err) {
-               message = "writing event-channel";
-               goto abort_transaction;
+
+       if (info->tx_evtchn == info->rx_evtchn) {
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "event-channel", "%u", info->tx_evtchn);
+               if (err) {
+                       message = "writing event-channel";
+                       goto abort_transaction;
+               }
+       } else {
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "event-channel-tx", "%u", info->tx_evtchn);
+               if (err) {
+                       message = "writing event-channel-tx";
+                       goto abort_transaction;
+               }
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "event-channel-rx", "%u", info->rx_evtchn);
+               if (err) {
+                       message = "writing event-channel-rx";
+                       goto abort_transaction;
+               }
        }
 
        err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
@@ -1718,7 +1841,9 @@ static int xennet_connect(struct net_device *dev)
         * packets.
         */
        netif_carrier_on(np->netdev);
-       notify_remote_via_irq(np->netdev->irq);
+       notify_remote_via_irq(np->tx_irq);
+       if (np->tx_irq != np->rx_irq)
+               notify_remote_via_irq(np->rx_irq);
        xennet_tx_buf_gc(dev);
        xennet_alloc_rx_buffers(dev);
 
index f802e7c923561d2bd331402b192c99aac6da9ff1..2dacd19e1b8a15c5a7c8d66db2eb7cb2793a58ed 100644 (file)
@@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
  */
 void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
 {
-       if (mw > NTB_NUM_MW)
+       if (mw >= NTB_NUM_MW)
                return NULL;
 
        return ndev->mw[mw].vbase;
@@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
  */
 resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
 {
-       if (mw > NTB_NUM_MW)
+       if (mw >= NTB_NUM_MW)
                return 0;
 
        return ndev->mw[mw].bar_sz;
@@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
  */
 void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
 {
-       if (mw > NTB_NUM_MW)
+       if (mw >= NTB_NUM_MW)
                return;
 
        dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
@@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                ndev->mw[i].vbase =
                    ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
                               ndev->mw[i].bar_sz);
-               dev_info(&pdev->dev, "MW %d size %d\n", i,
-                        (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+               dev_info(&pdev->dev, "MW %d size %llu\n", i,
+                        pci_resource_len(pdev, MW_TO_BAR(i)));
                if (!ndev->mw[i].vbase) {
                        dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
                                 MW_TO_BAR(i));
index e0bdfd7f9930aab4b42984b468ff87fac2868f44..f8d7081ee3014322e90c767fd766456f5410d311 100644 (file)
@@ -58,7 +58,7 @@
 #include <linux/ntb.h>
 #include "ntb_hw.h"
 
-#define NTB_TRANSPORT_VERSION  2
+#define NTB_TRANSPORT_VERSION  3
 
 static unsigned int transport_mtu = 0x401E;
 module_param(transport_mtu, uint, 0644);
@@ -173,10 +173,13 @@ struct ntb_payload_header {
 
 enum {
        VERSION = 0,
-       MW0_SZ,
-       MW1_SZ,
-       NUM_QPS,
        QP_LINKS,
+       NUM_QPS,
+       NUM_MWS,
+       MW0_SZ_HIGH,
+       MW0_SZ_LOW,
+       MW1_SZ_HIGH,
+       MW1_SZ_LOW,
        MAX_SPAD,
 };
 
@@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name)
 {
        struct ntb_transport_client_dev *client_dev;
        struct ntb_transport *nt;
-       int rc;
+       int rc, i = 0;
 
        if (list_empty(&ntb_transport_list))
                return -ENODEV;
@@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name)
                dev = &client_dev->dev;
 
                /* setup and register client devices */
-               dev_set_name(dev, "%s", device_name);
+               dev_set_name(dev, "%s%d", device_name, i);
                dev->bus = &ntb_bus_type;
                dev->release = ntb_client_release;
                dev->parent = &ntb_query_pdev(nt->ndev)->dev;
@@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name)
                }
 
                list_add_tail(&client_dev->entry, &nt->client_devs);
+               i++;
        }
 
        return 0;
@@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
                             (qp_num / NTB_NUM_MW * rx_size);
        rx_size -= sizeof(struct ntb_rx_info);
 
-       qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
-       qp->rx_max_frame = min(transport_mtu, rx_size);
+       qp->rx_buff = qp->remote_rx_info + 1;
+       /* Due to housekeeping, there must be atleast 2 buffs */
+       qp->rx_max_frame = min(transport_mtu, rx_size / 2);
        qp->rx_max_entry = rx_size / qp->rx_max_frame;
        qp->rx_index = 0;
 
-       qp->remote_rx_info->entry = qp->rx_max_entry;
+       qp->remote_rx_info->entry = qp->rx_max_entry - 1;
 
        /* setup the hdr offsets with 0's */
        for (i = 0; i < qp->rx_max_entry; i++) {
@@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
 
        qp->rx_pkts = 0;
        qp->tx_pkts = 0;
+       qp->tx_index = 0;
+}
+
+static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
+{
+       struct ntb_transport_mw *mw = &nt->mw[num_mw];
+       struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+       if (!mw->virt_addr)
+               return;
+
+       dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
+       mw->virt_addr = NULL;
 }
 
 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
@@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
        struct ntb_transport_mw *mw = &nt->mw[num_mw];
        struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
 
+       /* No need to re-setup */
+       if (mw->size == ALIGN(size, 4096))
+               return 0;
+
+       if (mw->size != 0)
+               ntb_free_mw(nt, num_mw);
+
        /* Alloc memory for receiving data.  Must be 4k aligned */
        mw->size = ALIGN(size, 4096);
 
        mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
                                           GFP_KERNEL);
        if (!mw->virt_addr) {
+               mw->size = 0;
                dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
                       (int) mw->size);
                return -ENOMEM;
@@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work)
        u32 val;
        int rc, i;
 
-       /* send the local info */
-       rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
-       if (rc) {
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       0, VERSION);
-               goto out;
-       }
+       /* send the local info, in the opposite order of the way we read it */
+       for (i = 0; i < NTB_NUM_MW; i++) {
+               rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
+                                          ntb_get_mw_size(ndev, i) >> 32);
+               if (rc) {
+                       dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+                               (u32)(ntb_get_mw_size(ndev, i) >> 32),
+                               MW0_SZ_HIGH + (i * 2));
+                       goto out;
+               }
 
-       rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
-       if (rc) {
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
-               goto out;
+               rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
+                                          (u32) ntb_get_mw_size(ndev, i));
+               if (rc) {
+                       dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+                               (u32) ntb_get_mw_size(ndev, i),
+                               MW0_SZ_LOW + (i * 2));
+                       goto out;
+               }
        }
 
-       rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+       rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
        if (rc) {
                dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+                       NTB_NUM_MW, NUM_MWS);
                goto out;
        }
 
@@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work)
                goto out;
        }
 
-       rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
-               goto out;
-       }
-
-       rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+       rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
        if (rc) {
                dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       val, QP_LINKS);
+                       NTB_TRANSPORT_VERSION, VERSION);
                goto out;
        }
 
@@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work)
                goto out;
        dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
 
-       rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+       rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
        if (rc) {
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+               dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
                goto out;
        }
 
-       if (!val)
+       if (val != NTB_NUM_MW)
                goto out;
-       dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+       dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
 
-       rc = ntb_set_mw(nt, 0, val);
-       if (rc)
-               goto out;
+       for (i = 0; i < NTB_NUM_MW; i++) {
+               u64 val64;
 
-       rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
-               goto out;
-       }
+               rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
+               if (rc) {
+                       dev_err(&pdev->dev, "Error reading remote spad %d\n",
+                               MW0_SZ_HIGH + (i * 2));
+                       goto out1;
+               }
 
-       if (!val)
-               goto out;
-       dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+               val64 = (u64) val << 32;
 
-       rc = ntb_set_mw(nt, 1, val);
-       if (rc)
-               goto out;
+               rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
+               if (rc) {
+                       dev_err(&pdev->dev, "Error reading remote spad %d\n",
+                               MW0_SZ_LOW + (i * 2));
+                       goto out1;
+               }
+
+               val64 |= val;
+
+               dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
+
+               rc = ntb_set_mw(nt, i, val64);
+               if (rc)
+                       goto out1;
+       }
 
        nt->transport_link = NTB_LINK_UP;
 
@@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work)
 
        return;
 
+out1:
+       for (i = 0; i < NTB_NUM_MW; i++)
+               ntb_free_mw(nt, i);
 out:
        if (ntb_hw_link_status(ndev))
                schedule_delayed_work(&nt->link_work,
@@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
                      (qp_num / NTB_NUM_MW * tx_size);
        tx_size -= sizeof(struct ntb_rx_info);
 
-       qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
-       qp->tx_max_frame = min(transport_mtu, tx_size);
+       qp->tx_mw = qp->rx_info + 1;
+       /* Due to housekeeping, there must be atleast 2 buffs */
+       qp->tx_max_frame = min(transport_mtu, tx_size / 2);
        qp->tx_max_entry = tx_size / qp->tx_max_frame;
-       qp->tx_index = 0;
 
        if (nt->debugfs_dir) {
                char debugfs_name[4];
@@ -897,10 +936,7 @@ void ntb_transport_free(void *transport)
        pdev = ntb_query_pdev(nt->ndev);
 
        for (i = 0; i < NTB_NUM_MW; i++)
-               if (nt->mw[i].virt_addr)
-                       dma_free_coherent(&pdev->dev, nt->mw[i].size,
-                                         nt->mw[i].virt_addr,
-                                         nt->mw[i].dma_addr);
+               ntb_free_mw(nt, i);
 
        kfree(nt->qps);
        ntb_unregister_transport(nt->ndev);
@@ -999,11 +1035,16 @@ out:
 static void ntb_transport_rx(unsigned long data)
 {
        struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
-       int rc;
+       int rc, i;
 
-       do {
+       /* Limit the number of packets processed in a single interrupt to
+        * provide fairness to others
+        */
+       for (i = 0; i < qp->rx_max_entry; i++) {
                rc = ntb_process_rxc(qp);
-       } while (!rc);
+               if (rc)
+                       break;
+       }
 }
 
 static void ntb_transport_rxc_db(void *data, int db_num)
@@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  */
 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 {
-       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct pci_dev *pdev;
        struct ntb_queue_entry *entry;
 
        if (!qp)
                return;
 
+       pdev = ntb_query_pdev(qp->ndev);
+
        cancel_delayed_work_sync(&qp->link_work);
 
        ntb_unregister_db_callback(qp->ndev, qp->qp_num);
@@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
  */
 void ntb_transport_link_down(struct ntb_transport_qp *qp)
 {
-       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct pci_dev *pdev;
        int rc, val;
 
        if (!qp)
                return;
 
+       pdev = ntb_query_pdev(qp->ndev);
        qp->client_ready = NTB_LINK_DOWN;
 
        rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
@@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down);
  */
 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
 {
+       if (!qp)
+               return false;
+
        return qp->qp_link == NTB_LINK_UP;
 }
 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
@@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query);
  */
 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
 {
+       if (!qp)
+               return 0;
+
        return qp->qp_num;
 }
 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
@@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
  */
 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
 {
+       if (!qp)
+               return 0;
+
        return qp->tx_max_frame - sizeof(struct ntb_payload_header);
 }
 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
index c76d16c972cc6cfaf6ce08fd56ebe89020508032..a6f584a7f4a13f2842663c98d807e9b38b2cf89f 100644 (file)
@@ -192,14 +192,15 @@ EXPORT_SYMBOL(of_find_property);
 struct device_node *of_find_all_nodes(struct device_node *prev)
 {
        struct device_node *np;
+       unsigned long flags;
 
-       raw_spin_lock(&devtree_lock);
+       raw_spin_lock_irqsave(&devtree_lock, flags);
        np = prev ? prev->allnext : of_allnodes;
        for (; np != NULL; np = np->allnext)
                if (of_node_get(np))
                        break;
        of_node_put(prev);
-       raw_spin_unlock(&devtree_lock);
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
        return np;
 }
 EXPORT_SYMBOL(of_find_all_nodes);
@@ -421,8 +422,9 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
        struct device_node *prev)
 {
        struct device_node *next;
+       unsigned long flags;
 
-       raw_spin_lock(&devtree_lock);
+       raw_spin_lock_irqsave(&devtree_lock, flags);
        next = prev ? prev->sibling : node->child;
        for (; next; next = next->sibling) {
                if (!__of_device_is_available(next))
@@ -431,7 +433,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
                        break;
        }
        of_node_put(prev);
-       raw_spin_unlock(&devtree_lock);
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
        return next;
 }
 EXPORT_SYMBOL(of_get_next_available_child);
@@ -735,13 +737,14 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
 struct device_node *of_find_node_by_phandle(phandle handle)
 {
        struct device_node *np;
+       unsigned long flags;
 
-       raw_spin_lock(&devtree_lock);
+       raw_spin_lock_irqsave(&devtree_lock, flags);
        for (np = of_allnodes; np; np = np->allnext)
                if (np->phandle == handle)
                        break;
        of_node_get(np);
-       raw_spin_unlock(&devtree_lock);
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
        return np;
 }
 EXPORT_SYMBOL(of_find_node_by_phandle);
@@ -1208,11 +1211,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
                                out_args->args_count = count;
                                for (i = 0; i < count; i++)
                                        out_args->args[i] = be32_to_cpup(list++);
+                       } else {
+                               of_node_put(node);
                        }
 
                        /* Found it! return success */
-                       if (node)
-                               of_node_put(node);
                        return 0;
                }
 
index ffab033d207e199113e4772e345c7968fc027744..ea174c8ee34b56502860f78f94ba61df48fe764e 100644 (file)
@@ -22,6 +22,7 @@ static const char *phy_modes[] = {
        [PHY_INTERFACE_MODE_GMII]       = "gmii",
        [PHY_INTERFACE_MODE_SGMII]      = "sgmii",
        [PHY_INTERFACE_MODE_TBI]        = "tbi",
+       [PHY_INTERFACE_MODE_REVMII]     = "rev-mii",
        [PHY_INTERFACE_MODE_RMII]       = "rmii",
        [PHY_INTERFACE_MODE_RGMII]      = "rgmii",
        [PHY_INTERFACE_MODE_RGMII_ID]   = "rgmii-id",
index 2ef7103270bb7eb250ae15e16650c17a46a643d7..1f05913ae677e2724c78729a324a01ac1d2afbbd 100644 (file)
@@ -668,7 +668,7 @@ lba_fixup_bus(struct pci_bus *bus)
                        BUG();
                }
 
-               if (ldev->hba.elmmio_space.start) {
+               if (ldev->hba.elmmio_space.flags) {
                        err = request_resource(&iomem_resource,
                                        &(ldev->hba.elmmio_space));
                        if (err < 0) {
@@ -993,7 +993,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
 
                case PAT_LMMIO:
                        /* used to fix up pre-initialized MEM BARs */
-                       if (!lba_dev->hba.lmmio_space.start) {
+                       if (!lba_dev->hba.lmmio_space.flags) {
                                sprintf(lba_dev->hba.lmmio_name,
                                                "PCI%02x LMMIO",
                                                (int)lba_dev->hba.bus_num.start);
@@ -1001,7 +1001,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
                                        io->start;
                                r = &lba_dev->hba.lmmio_space;
                                r->name = lba_dev->hba.lmmio_name;
-                       } else if (!lba_dev->hba.elmmio_space.start) {
+                       } else if (!lba_dev->hba.elmmio_space.flags) {
                                sprintf(lba_dev->hba.elmmio_name,
                                                "PCI%02x ELMMIO",
                                                (int)lba_dev->hba.bus_num.start);
@@ -1096,6 +1096,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
        r->name = "LBA PCI Busses";
        r->start = lba_num & 0xff;
        r->end = (lba_num>>8) & 0xff;
+       r->flags = IORESOURCE_BUS;
 
        /* Set up local PCI Bus resources - we don't need them for
        ** Legacy boxes but it's nice to see in /proc/iomem.
@@ -1494,7 +1495,7 @@ lba_driver_probe(struct parisc_device *dev)
 
        pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
                                HBA_PORT_BASE(lba_dev->hba.hba_num));
-       if (lba_dev->hba.elmmio_space.start)
+       if (lba_dev->hba.elmmio_space.flags)
                pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
                                        lba_dev->hba.lmmio_space_offset);
        if (lba_dev->hba.lmmio_space.flags)
index ac6e8e7a02df079222725fc35074045f249719b2..a042d065a0c757bdee271718808c349c6a07898e 100644 (file)
@@ -494,15 +494,4 @@ static struct pci_driver superio_driver = {
        .probe =        superio_probe,
 };
 
-static int __init superio_modinit(void)
-{
-       return pci_register_driver(&superio_driver);
-}
-
-static void __exit superio_exit(void)
-{
-       pci_unregister_driver(&superio_driver);
-}
-
-module_init(superio_modinit);
-module_exit(superio_exit);
+module_pci_driver(superio_driver);
index 24e12d4d17699ab41601676ccb1f342b9cfdbc87..a50576081b34dd998fbbd8aca4fd0c154df5420e 100644 (file)
@@ -71,7 +71,7 @@ config PARPORT_PC_FIFO
 
 config PARPORT_PC_SUPERIO
        bool "SuperIO chipset support"
-       depends on PARPORT_PC
+       depends on PARPORT_PC && !PARISC
        help
          Saying Y here enables some probes for Super-IO chipsets in order to
          find out things like base addresses, IRQ lines and DMA channels.  It
index a5251cb5fb0c616f4254c025bbd77d5925014a2f..6e3a60c788736a1e01c6d56377a4222d5e880e4a 100644 (file)
@@ -234,7 +234,7 @@ static int parport_PS2_supported(struct parport *pb)
 
 struct parport *parport_gsc_probe_port(unsigned long base,
                                       unsigned long base_hi, int irq,
-                                      int dma, struct pci_dev *dev)
+                                      int dma, struct parisc_device *padev)
 {
        struct parport_gsc_private *priv;
        struct parport_operations *ops;
@@ -258,7 +258,6 @@ struct parport *parport_gsc_probe_port(unsigned long base,
        priv->ctr_writable = 0xff;
        priv->dma_buf = 0;
        priv->dma_handle = 0;
-       priv->dev = dev;
        p->base = base;
        p->base_hi = base_hi;
        p->irq = irq;
@@ -282,6 +281,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
                return NULL;
        }
 
+       p->dev = &padev->dev;
        p->base_hi = base_hi;
        p->modes = tmp.modes;
        p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
@@ -373,7 +373,7 @@ static int parport_init_chip(struct parisc_device *dev)
        }
        
        p = parport_gsc_probe_port(port, 0, dev->irq,
-                       /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL);
+                       /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev);
        if (p)
                parport_count++;
        dev_set_drvdata(&dev->dev, p);
index fc9c37c5402222db388e294f50092387f6de5bb6..812214768d27e511b42e18c24d7048a8df57c4c2 100644 (file)
@@ -217,6 +217,6 @@ extern void parport_gsc_dec_use_count(void);
 extern struct parport *parport_gsc_probe_port(unsigned long base,
                                                unsigned long base_hi,
                                                int irq, int dma,
-                                               struct pci_dev *dev);
+                                               struct parisc_device *padev);
 
 #endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */
index 96fed19c6d90358833e37d1e9f55b1899b2d11b8..716aa93fff76437ab0038de385d7c4fa7ec04834 100644 (file)
@@ -950,6 +950,20 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
        return AE_OK ;
 }
 
+void acpiphp_check_host_bridge(acpi_handle handle)
+{
+       struct acpiphp_bridge *bridge;
+
+       bridge = acpiphp_handle_to_bridge(handle);
+       if (bridge) {
+               acpiphp_check_bridge(bridge);
+               put_bridge(bridge);
+       }
+
+       acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+               ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+}
+
 static void _handle_hotplug_event_bridge(struct work_struct *work)
 {
        struct acpiphp_bridge *bridge;
index 8ec8b4f485604e384de9bf7eddfe2c97c4cae5d5..0f4554e48cc5f52850d766078dff76f8c1cdc4d0 100644 (file)
@@ -580,6 +580,7 @@ struct aer_recover_entry
        u8      devfn;
        u16     domain;
        int     severity;
+       struct aer_capability_regs *regs;
 };
 
 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
@@ -593,7 +594,7 @@ static DEFINE_SPINLOCK(aer_recover_ring_lock);
 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
 
 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
-                      int severity)
+                      int severity, struct aer_capability_regs *aer_regs)
 {
        unsigned long flags;
        struct aer_recover_entry entry = {
@@ -601,6 +602,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
                .devfn          = devfn,
                .domain         = domain,
                .severity       = severity,
+               .regs           = aer_regs,
        };
 
        spin_lock_irqsave(&aer_recover_ring_lock, flags);
@@ -627,6 +629,7 @@ static void aer_recover_work_func(struct work_struct *work)
                               PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
                        continue;
                }
+               cper_print_aer(pdev, entry.severity, entry.regs);
                do_recovery(pdev, entry.severity);
                pci_dev_put(pdev);
        }
index 5ab14251839d0f06c395ac0dfc064c166cf3b75a..2c7c9f5f592caa4253b862775bfbb2ee3854af7d 100644 (file)
@@ -220,7 +220,7 @@ int cper_severity_to_aer(int cper_severity)
 }
 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
 
-void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int cper_severity,
                    struct aer_capability_regs *aer)
 {
        int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
@@ -244,7 +244,7 @@ void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
        agent = AER_GET_AGENT(aer_severity, status);
        dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
               status, mask);
-       cper_print_bits(prefix, status, status_strs, status_strs_size);
+       cper_print_bits("", status, status_strs, status_strs_size);
        dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
               aer_error_layer[layer], aer_agent_string[agent]);
        if (aer_severity != AER_CORRECTABLE)
index c67c37e23dd77846d2fd3bbd18bf102349064b57..694c3ace45204c11eeed4518f82b901c92489b59 100644 (file)
@@ -610,7 +610,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
        bool found = false;
        unsigned long config;
 
-       mutex_lock(&pctldev->mutex);
+       mutex_lock(&pinctrl_maps_mutex);
 
        /* Parse the pinctrl map and look for the elected pin/state */
        for_each_maps(maps_node, i, map) {
@@ -659,7 +659,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
                confops->pin_config_config_dbg_show(pctldev, s, config);
 
 exit:
-       mutex_unlock(&pctldev->mutex);
+       mutex_unlock(&pinctrl_maps_mutex);
 
        return 0;
 }
index aa17f7580f617b05d7469c372656922f229a9803..6d4532702f809b5cc52629fbdc7ad238412153f3 100644 (file)
@@ -851,23 +851,12 @@ static int abx500_gpio_probe(struct platform_device *pdev)
 
        if (abx500_pdata)
                pdata = abx500_pdata->gpio;
-       if (!pdata) {
-               if (np) {
-                       const struct of_device_id *match;
 
-                       match = of_match_device(abx500_gpio_match, &pdev->dev);
-                       if (!match)
-                               return -ENODEV;
-                       id = (unsigned long)match->data;
-               } else {
-                       dev_err(&pdev->dev, "gpio dt and platform data missing\n");
-                       return -ENODEV;
-               }
+       if (!(pdata || np)) {
+               dev_err(&pdev->dev, "gpio dt and platform data missing\n");
+               return -ENODEV;
        }
 
-       if (platid)
-               id = platid->driver_data;
-
        pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl),
                                   GFP_KERNEL);
        if (pct == NULL) {
@@ -882,6 +871,16 @@ static int abx500_gpio_probe(struct platform_device *pdev)
        pct->chip.dev = &pdev->dev;
        pct->chip.base = (np) ? -1 : pdata->gpio_base;
 
+       if (platid)
+               id = platid->driver_data;
+       else if (np) {
+               const struct of_device_id *match;
+
+               match = of_match_device(abx500_gpio_match, &pdev->dev);
+               if (match)
+                       id = (unsigned long)match->data;
+       }
+
        /* initialize the lock */
        mutex_init(&pct->lock);
 
@@ -900,8 +899,7 @@ static int abx500_gpio_probe(struct platform_device *pdev)
                abx500_pinctrl_ab8505_init(&pct->soc);
                break;
        default:
-               dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n",
-                               (int) platid->driver_data);
+               dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id);
                mutex_destroy(&pct->lock);
                return -EINVAL;
        }
index edde3acc41864b5d9770be7696e553558d428902..d6b41747d687e13c188340ed08a3027c9646245d 100644 (file)
@@ -713,11 +713,6 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
        gpio->dev = &pdev->dev;
 
        memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!memres) {
-               dev_err(gpio->dev, "could not get GPIO memory resource\n");
-               return -ENODEV;
-       }
-
        gpio->base = devm_ioremap_resource(&pdev->dev, memres);
        if (IS_ERR(gpio->base))
                return PTR_ERR(gpio->base);
@@ -835,7 +830,8 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
        return 0;
 
 err_no_range:
-       err = gpiochip_remove(&gpio->chip);
+       if (gpiochip_remove(&gpio->chip))
+               dev_err(&pdev->dev, "failed to remove gpio chip\n");
 err_no_chip:
 err_no_domain:
 err_no_port:
index ac742817ebceeebf1797a27b6f1995931a9df363..2d76f66a2e0b90c3a3ac1cfe9036a42840c15a43 100644 (file)
@@ -196,6 +196,12 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+struct exynos_eint_gpio_save {
+       u32 eint_con;
+       u32 eint_fltcon0;
+       u32 eint_fltcon1;
+};
+
 /*
  * exynos_eint_gpio_init() - setup handling of external gpio interrupts.
  * @d: driver data of samsung pinctrl driver.
@@ -204,8 +210,8 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
 {
        struct samsung_pin_bank *bank;
        struct device *dev = d->dev;
-       unsigned int ret;
-       unsigned int i;
+       int ret;
+       int i;
 
        if (!d->irq) {
                dev_err(dev, "irq number not available\n");
@@ -227,11 +233,29 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
                                bank->nr_pins, &exynos_gpio_irqd_ops, bank);
                if (!bank->irq_domain) {
                        dev_err(dev, "gpio irq domain add failed\n");
-                       return -ENXIO;
+                       ret = -ENXIO;
+                       goto err_domains;
+               }
+
+               bank->soc_priv = devm_kzalloc(d->dev,
+                       sizeof(struct exynos_eint_gpio_save), GFP_KERNEL);
+               if (!bank->soc_priv) {
+                       irq_domain_remove(bank->irq_domain);
+                       ret = -ENOMEM;
+                       goto err_domains;
                }
        }
 
        return 0;
+
+err_domains:
+       for (--i, --bank; i >= 0; --i, --bank) {
+               if (bank->eint_type != EINT_TYPE_GPIO)
+                       continue;
+               irq_domain_remove(bank->irq_domain);
+       }
+
+       return ret;
 }
 
 static void exynos_wkup_irq_unmask(struct irq_data *irqd)
@@ -326,6 +350,28 @@ static int exynos_wkup_irq_set_type(struct irq_data *irqd, unsigned int type)
        return 0;
 }
 
+static u32 exynos_eint_wake_mask = 0xffffffff;
+
+u32 exynos_get_eint_wake_mask(void)
+{
+       return exynos_eint_wake_mask;
+}
+
+static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
+{
+       struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+       unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
+
+       pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq);
+
+       if (!on)
+               exynos_eint_wake_mask |= bit;
+       else
+               exynos_eint_wake_mask &= ~bit;
+
+       return 0;
+}
+
 /*
  * irq_chip for wakeup interrupts
  */
@@ -335,6 +381,7 @@ static struct irq_chip exynos_wkup_irq_chip = {
        .irq_mask       = exynos_wkup_irq_mask,
        .irq_ack        = exynos_wkup_irq_ack,
        .irq_set_type   = exynos_wkup_irq_set_type,
+       .irq_set_wake   = exynos_wkup_irq_set_wake,
 };
 
 /* interrupt handler for wakeup interrupts 0..15 */
@@ -505,6 +552,72 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
        return 0;
 }
 
+static void exynos_pinctrl_suspend_bank(
+                               struct samsung_pinctrl_drv_data *drvdata,
+                               struct samsung_pin_bank *bank)
+{
+       struct exynos_eint_gpio_save *save = bank->soc_priv;
+       void __iomem *regs = drvdata->virt_base;
+
+       save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+                                               + bank->eint_offset);
+       save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset);
+       save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset + 4);
+
+       pr_debug("%s: save     con %#010x\n", bank->name, save->eint_con);
+       pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
+       pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
+}
+
+static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       struct samsung_pin_bank *bank = ctrl->pin_banks;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+               if (bank->eint_type == EINT_TYPE_GPIO)
+                       exynos_pinctrl_suspend_bank(drvdata, bank);
+}
+
+static void exynos_pinctrl_resume_bank(
+                               struct samsung_pinctrl_drv_data *drvdata,
+                               struct samsung_pin_bank *bank)
+{
+       struct exynos_eint_gpio_save *save = bank->soc_priv;
+       void __iomem *regs = drvdata->virt_base;
+
+       pr_debug("%s:     con %#010x => %#010x\n", bank->name,
+                       readl(regs + EXYNOS_GPIO_ECON_OFFSET
+                       + bank->eint_offset), save->eint_con);
+       pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+                       readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                       + 2 * bank->eint_offset), save->eint_fltcon0);
+       pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+                       readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                       + 2 * bank->eint_offset + 4), save->eint_fltcon1);
+
+       writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+                                               + bank->eint_offset);
+       writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset);
+       writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset + 4);
+}
+
+static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       struct samsung_pin_bank *bank = ctrl->pin_banks;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+               if (bank->eint_type == EINT_TYPE_GPIO)
+                       exynos_pinctrl_resume_bank(drvdata, bank);
+}
+
 /* pin banks of exynos4210 pin-controller 0 */
 static struct samsung_pin_bank exynos4210_pin_banks0[] = {
        EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -568,6 +681,8 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4210-gpio-ctrl0",
        }, {
                /* pin-controller instance 1 data */
@@ -582,6 +697,8 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
                .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4210-gpio-ctrl1",
        }, {
                /* pin-controller instance 2 data */
@@ -663,6 +780,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl0",
        }, {
                /* pin-controller instance 1 data */
@@ -677,6 +796,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
                .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl1",
        }, {
                /* pin-controller instance 2 data */
@@ -687,6 +808,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl2",
        }, {
                /* pin-controller instance 3 data */
@@ -697,6 +820,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl3",
        },
 };
@@ -775,6 +900,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
                .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl0",
        }, {
                /* pin-controller instance 1 data */
@@ -785,6 +912,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl1",
        }, {
                /* pin-controller instance 2 data */
@@ -795,6 +924,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl2",
        }, {
                /* pin-controller instance 3 data */
@@ -805,6 +936,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl3",
        },
 };
index 9b1f77a5bf0fd2e071aab24a238a3f0dc5092695..3c91c357792ff0f5f5b7ad9739f934f4337e0a07 100644 (file)
@@ -19,6 +19,7 @@
 
 /* External GPIO and wakeup interrupt related definitions */
 #define EXYNOS_GPIO_ECON_OFFSET                0x700
+#define EXYNOS_GPIO_EFLTCON_OFFSET     0x800
 #define EXYNOS_GPIO_EMASK_OFFSET       0x900
 #define EXYNOS_GPIO_EPEND_OFFSET       0xA00
 #define EXYNOS_WKUP_ECON_OFFSET                0xE00
index 6038503ed929cc5c9332e7f4903412cf2d1d3611..32a48f44f574264f5c1b320e00db8065f1ba0616 100644 (file)
@@ -1000,11 +1000,6 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "cannot find IO resource\n");
-               return -ENOENT;
-       }
-
        priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->reg_base))
                return PTR_ERR(priv->reg_base);
index 615c5002b757e1515187a1a87278a69d9a980d8f..d22ca252b80d41b6b1d10b59f83d9e33e9f9b1ef 100644 (file)
@@ -52,7 +52,8 @@ static void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
        int i;
 
        for (i = 0; i < num_maps; i++)
-               if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+               if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN ||
+                   map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
                        kfree(map[i].data.configs.configs);
        kfree(map);
 }
index 976366899f68831f6765f1422ce996a1c8dec3b8..63ac22e89678c8bd9833be95b44b7c525afab620 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/gpio.h>
 #include <linux/irqdomain.h>
 #include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
 
 #include "core.h"
 #include "pinctrl-samsung.h"
@@ -48,6 +49,9 @@ static struct pin_config {
        { "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN },
 };
 
+/* Global list of devices (struct samsung_pinctrl_drv_data) */
+LIST_HEAD(drvdata_list);
+
 static unsigned int pin_base;
 
 static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc)
@@ -932,11 +936,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
        drvdata->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "cannot find IO resource\n");
-               return -ENOENT;
-       }
-
        drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(drvdata->virt_base))
                return PTR_ERR(drvdata->virt_base);
@@ -961,9 +960,151 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
                ctrl->eint_wkup_init(drvdata);
 
        platform_set_drvdata(pdev, drvdata);
+
+       /* Add to the global list */
+       list_add_tail(&drvdata->node, &drvdata_list);
+
        return 0;
 }
 
+#ifdef CONFIG_PM
+
+/**
+ * samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device
+ *
+ * Save data for all banks handled by this device.
+ */
+static void samsung_pinctrl_suspend_dev(
+       struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       void __iomem *virt_base = drvdata->virt_base;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; i++) {
+               struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+               void __iomem *reg = virt_base + bank->pctl_offset;
+
+               u8 *offs = bank->type->reg_offset;
+               u8 *widths = bank->type->fld_width;
+               enum pincfg_type type;
+
+               /* Registers without a powerdown config aren't lost */
+               if (!widths[PINCFG_TYPE_CON_PDN])
+                       continue;
+
+               for (type = 0; type < PINCFG_TYPE_NUM; type++)
+                       if (widths[type])
+                               bank->pm_save[type] = readl(reg + offs[type]);
+
+               if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
+                       /* Some banks have two config registers */
+                       bank->pm_save[PINCFG_TYPE_NUM] =
+                               readl(reg + offs[PINCFG_TYPE_FUNC] + 4);
+                       pr_debug("Save %s @ %p (con %#010x %08x)\n",
+                                bank->name, reg,
+                                bank->pm_save[PINCFG_TYPE_FUNC],
+                                bank->pm_save[PINCFG_TYPE_NUM]);
+               } else {
+                       pr_debug("Save %s @ %p (con %#010x)\n", bank->name,
+                                reg, bank->pm_save[PINCFG_TYPE_FUNC]);
+               }
+       }
+
+       if (ctrl->suspend)
+               ctrl->suspend(drvdata);
+}
+
+/**
+ * samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device
+ *
+ * Restore one of the banks that was saved during suspend.
+ *
+ * We don't bother doing anything complicated to avoid glitching lines since
+ * we're called before pad retention is turned off.
+ */
+static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       void __iomem *virt_base = drvdata->virt_base;
+       int i;
+
+       if (ctrl->resume)
+               ctrl->resume(drvdata);
+
+       for (i = 0; i < ctrl->nr_banks; i++) {
+               struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+               void __iomem *reg = virt_base + bank->pctl_offset;
+
+               u8 *offs = bank->type->reg_offset;
+               u8 *widths = bank->type->fld_width;
+               enum pincfg_type type;
+
+               /* Registers without a powerdown config aren't lost */
+               if (!widths[PINCFG_TYPE_CON_PDN])
+                       continue;
+
+               if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
+                       /* Some banks have two config registers */
+                       pr_debug("%s @ %p (con %#010x %08x => %#010x %08x)\n",
+                                bank->name, reg,
+                                readl(reg + offs[PINCFG_TYPE_FUNC]),
+                                readl(reg + offs[PINCFG_TYPE_FUNC] + 4),
+                                bank->pm_save[PINCFG_TYPE_FUNC],
+                                bank->pm_save[PINCFG_TYPE_NUM]);
+                       writel(bank->pm_save[PINCFG_TYPE_NUM],
+                              reg + offs[PINCFG_TYPE_FUNC] + 4);
+               } else {
+                       pr_debug("%s @ %p (con %#010x => %#010x)\n", bank->name,
+                                reg, readl(reg + offs[PINCFG_TYPE_FUNC]),
+                                bank->pm_save[PINCFG_TYPE_FUNC]);
+               }
+               for (type = 0; type < PINCFG_TYPE_NUM; type++)
+                       if (widths[type])
+                               writel(bank->pm_save[type], reg + offs[type]);
+       }
+}
+
+/**
+ * samsung_pinctrl_suspend - save pinctrl state for suspend
+ *
+ * Save data for all banks across all devices.
+ */
+static int samsung_pinctrl_suspend(void)
+{
+       struct samsung_pinctrl_drv_data *drvdata;
+
+       list_for_each_entry(drvdata, &drvdata_list, node) {
+               samsung_pinctrl_suspend_dev(drvdata);
+       }
+
+       return 0;
+}
+
+/**
+ * samsung_pinctrl_resume - restore pinctrl state for suspend
+ *
+ * Restore data for all banks across all devices.
+ */
+static void samsung_pinctrl_resume(void)
+{
+       struct samsung_pinctrl_drv_data *drvdata;
+
+       list_for_each_entry_reverse(drvdata, &drvdata_list, node) {
+               samsung_pinctrl_resume_dev(drvdata);
+       }
+}
+
+#else
+#define samsung_pinctrl_suspend                NULL
+#define samsung_pinctrl_resume         NULL
+#endif
+
+static struct syscore_ops samsung_pinctrl_syscore_ops = {
+       .suspend        = samsung_pinctrl_suspend,
+       .resume         = samsung_pinctrl_resume,
+};
+
 static const struct of_device_id samsung_pinctrl_dt_match[] = {
 #ifdef CONFIG_PINCTRL_EXYNOS
        { .compatible = "samsung,exynos4210-pinctrl",
@@ -992,6 +1133,14 @@ static struct platform_driver samsung_pinctrl_driver = {
 
 static int __init samsung_pinctrl_drv_register(void)
 {
+       /*
+        * Register syscore ops for save/restore of registers across suspend.
+        * It's important to ensure that this driver is running at an earlier
+        * initcall level than any arch-specific init calls that install syscore
+        * ops that turn off pad retention (like exynos_pm_resume).
+        */
+       register_syscore_ops(&samsung_pinctrl_syscore_ops);
+
        return platform_driver_register(&samsung_pinctrl_driver);
 }
 postcore_initcall(samsung_pinctrl_drv_register);
index 7c7f9ebcd05b13d183889f6bb360f080ccc7d8ca..26d3519240c9c7f93bcb3628abf7feb6f6707535 100644 (file)
@@ -127,6 +127,7 @@ struct samsung_pin_bank_type {
  * @gpio_chip: GPIO chip of the bank.
  * @grange: linux gpio pin range supported by this bank.
  * @slock: spinlock protecting bank registers
+ * @pm_save: saved register values during suspend
  */
 struct samsung_pin_bank {
        struct samsung_pin_bank_type *type;
@@ -138,12 +139,15 @@ struct samsung_pin_bank {
        u32             eint_mask;
        u32             eint_offset;
        char            *name;
+       void            *soc_priv;
        struct device_node *of_node;
        struct samsung_pinctrl_drv_data *drvdata;
        struct irq_domain *irq_domain;
        struct gpio_chip gpio_chip;
        struct pinctrl_gpio_range grange;
        spinlock_t slock;
+
+       u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
 };
 
 /**
@@ -184,11 +188,15 @@ struct samsung_pin_ctrl {
 
        int             (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
        int             (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
+       void            (*suspend)(struct samsung_pinctrl_drv_data *);
+       void            (*resume)(struct samsung_pinctrl_drv_data *);
+
        char            *label;
 };
 
 /**
  * struct samsung_pinctrl_drv_data: wrapper for holding driver data together.
+ * @node: global list node
  * @virt_base: register base address of the controller.
  * @dev: device instance representing the controller.
  * @irq: interrpt number used by the controller to notify gpio interrupts.
@@ -201,6 +209,7 @@ struct samsung_pin_ctrl {
  * @nr_function: number of such pin functions.
  */
 struct samsung_pinctrl_drv_data {
+       struct list_head                node;
        void __iomem                    *virt_base;
        struct device                   *dev;
        int                             irq;
index 5f2d2bfd356e92c6bfac0ff54dde45fb472f7fbe..b9fa046186011bb0e3c781bbc6b0b7787b272c97 100644 (file)
@@ -1166,7 +1166,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
        (*map)->data.mux.function = np->name;
 
        if (pcs->is_pinconf) {
-               if (pcs_parse_pinconf(pcs, np, function, map))
+               res = pcs_parse_pinconf(pcs, np, function, map);
+               if (res)
                        goto free_pingroups;
                *num_maps = 2;
        } else {
index c52fc2c087327c6239c867bec7553a4eadd99d0c..b7d8c890514c7429c7533ce134f09cbbd440d333 100644 (file)
@@ -1990,8 +1990,10 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
        }
 
        clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
                goto gpiochip_error;
+       }
 
        clk_prepare_enable(clk);
 
@@ -2000,7 +2002,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
        return 0;
 
 gpiochip_error:
-       ret = gpiochip_remove(pctl->chip);
+       if (gpiochip_remove(pctl->chip))
+               dev_err(&pdev->dev, "failed to remove gpio chip\n");
 pinctrl_error:
        pinctrl_unregister(pctl->pctl_dev);
        return ret;
index f2977cff8366f4b89f88987f1080039a39f2f110..e92132c76a6b56cf02415f18b890c259f377c404 100644 (file)
@@ -716,10 +716,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
 
        /* get and remap our register range */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get resource\n");
-               return -ENOENT;
-       }
        xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(xway_info.membase[0]))
                return PTR_ERR(xway_info.membase[0]);
index 791a6719d8a9d0b551b70e3532433efc3c60e97f..8cd90e7e945ae2317c73e42d6aa48bb6ffa4a160 100644 (file)
@@ -2357,27 +2357,48 @@ static const unsigned int sdhi3_wp_mux[] = {
 };
 /* - USB0 ------------------------------------------------------------------- */
 static const unsigned int usb0_pins[] = {
-       /* OVC */
-       150, 154,
+       /* PENC */
+       154,
 };
 static const unsigned int usb0_mux[] = {
-       USB_OVC0_MARK, USB_PENC0_MARK,
+       USB_PENC0_MARK,
+};
+static const unsigned int usb0_ovc_pins[] = {
+       /* USB_OVC */
+       150
+};
+static const unsigned int usb0_ovc_mux[] = {
+       USB_OVC0_MARK,
 };
 /* - USB1 ------------------------------------------------------------------- */
 static const unsigned int usb1_pins[] = {
-       /* OVC */
-       152, 155,
+       /* PENC */
+       155,
 };
 static const unsigned int usb1_mux[] = {
-       USB_OVC1_MARK, USB_PENC1_MARK,
+       USB_PENC1_MARK,
+};
+static const unsigned int usb1_ovc_pins[] = {
+       /* USB_OVC */
+       152,
+};
+static const unsigned int usb1_ovc_mux[] = {
+       USB_OVC1_MARK,
 };
 /* - USB2 ------------------------------------------------------------------- */
 static const unsigned int usb2_pins[] = {
-       /* OVC, PENC */
-       125, 156,
+       /* PENC */
+       156,
 };
 static const unsigned int usb2_mux[] = {
-       USB_OVC2_MARK, USB_PENC2_MARK,
+       USB_PENC2_MARK,
+};
+static const unsigned int usb2_ovc_pins[] = {
+       /* USB_OVC */
+       125,
+};
+static const unsigned int usb2_ovc_mux[] = {
+       USB_OVC2_MARK,
 };
 
 static const struct sh_pfc_pin_group pinmux_groups[] = {
@@ -2501,8 +2522,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(sdhi3_cd),
        SH_PFC_PIN_GROUP(sdhi3_wp),
        SH_PFC_PIN_GROUP(usb0),
+       SH_PFC_PIN_GROUP(usb0_ovc),
        SH_PFC_PIN_GROUP(usb1),
+       SH_PFC_PIN_GROUP(usb1_ovc),
        SH_PFC_PIN_GROUP(usb2),
+       SH_PFC_PIN_GROUP(usb2_ovc),
 };
 
 static const char * const du0_groups[] = {
@@ -2683,14 +2707,17 @@ static const char * const sdhi3_groups[] = {
 
 static const char * const usb0_groups[] = {
        "usb0",
+       "usb0_ovc",
 };
 
 static const char * const usb1_groups[] = {
        "usb1",
+       "usb1_ovc",
 };
 
 static const char * const usb2_groups[] = {
        "usb2",
+       "usb2_ovc",
 };
 
 static const struct sh_pfc_function pinmux_functions[] = {
index b964cc5505681c9a590de10852e6187a2b558d79..de43262398db483cfd9b21cc3cabb22bc4903011 100644 (file)
@@ -53,7 +53,7 @@ static const struct wmt_pinctrl_bank_registers wm8750_banks[] = {
 #define WMT_PIN_EXTGPIO6       WMT_PIN(0, 6)
 #define WMT_PIN_EXTGPIO7       WMT_PIN(0, 7)
 #define WMT_PIN_WAKEUP0                WMT_PIN(0, 16)
-#define WMT_PIN_WAKEUP1                WMT_PIN(0, 16)
+#define WMT_PIN_WAKEUP1                WMT_PIN(0, 17)
 #define WMT_PIN_SD0CD          WMT_PIN(0, 28)
 #define WMT_PIN_VDOUT0         WMT_PIN(1, 0)
 #define WMT_PIN_VDOUT1         WMT_PIN(1, 1)
index ab63104e8dc98eb148378b859b6217c93170c304..70d986e04afb205d6b908fb95d25b87b0d0c6804 100644 (file)
@@ -609,8 +609,7 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
        return 0;
 
 fail_range:
-       err = gpiochip_remove(&data->gpio_chip);
-       if (err)
+       if (gpiochip_remove(&data->gpio_chip))
                dev_err(&pdev->dev, "failed to remove gpio chip\n");
 fail_gpio:
        pinctrl_unregister(data->pctl_dev);
index 8df0c5a21be27d7a2044b06b8ee21bc8fab90359..d111c8687f9bd2d370b52127074c3b7dabb7058e 100644 (file)
@@ -703,7 +703,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
                }
                rfkill_init_sw_state(gps_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_GPS));
-               rfkill_set_hw_state(bluetooth_rfkill,
+               rfkill_set_hw_state(gps_rfkill,
                                    hp_wmi_get_hw_state(HPWMI_GPS));
                err = rfkill_register(gps_rfkill);
                if (err)
index 0d0b5d7d19d02f9e2c0a245de4da43f084e3340e..7b8979c63f4882e6c3e5375b9f9c35c40e5a829c 100644 (file)
@@ -152,6 +152,7 @@ config BATTERY_SBS
 
 config BATTERY_BQ27x00
        tristate "BQ27x00 battery driver"
+       depends on I2C || I2C=n
        help
          Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips.
 
@@ -284,6 +285,7 @@ config CHARGER_LP8788
        tristate "TI LP8788 charger driver"
        depends on MFD_LP8788
        depends on LP8788_ADC
+       depends on IIO
        help
          Say Y to enable support for the LP8788 linear charger.
 
index a44175139bbf3a619d65fe611cc9fa961388cd76..fef56e2041b325cf9e086d434f1755fbca0a8857 100644 (file)
@@ -1269,5 +1269,5 @@ module_exit(pm2xxx_charger_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
-MODULE_ALIAS("platform:pm2xxx-charger");
+MODULE_ALIAS("i2c:pm2xxx-charger");
 MODULE_DESCRIPTION("PM2xxx charger management driver");
index 58cbb009b74f144564b069bf9344e801a5f02743..56fb509f4be00834f964fa2636b35c97eebf6849 100644 (file)
@@ -207,7 +207,6 @@ static int wm831x_backup_remove(struct platform_device *pdev)
        struct wm831x_backup *devdata = platform_get_drvdata(pdev);
 
        power_supply_unregister(&devdata->backup);
-       kfree(devdata->backup.name);
 
        return 0;
 }
index bea94510ad2d4c0d3faf7cc703c9b8883ec9cf97..71a2559278d7a0d41bf773edcf5219f35462ddb1 100644 (file)
@@ -628,9 +628,10 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        chip->caps = ptp_pch_caps;
        chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
-
-       if (IS_ERR(chip->ptp_clock))
-               return PTR_ERR(chip->ptp_clock);
+       if (IS_ERR(chip->ptp_clock)) {
+               ret = PTR_ERR(chip->ptp_clock);
+               goto err_ptp_clock_reg;
+       }
 
        spin_lock_init(&chip->register_lock);
 
@@ -669,6 +670,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 err_req_irq:
        ptp_clock_unregister(chip->ptp_clock);
+err_ptp_clock_reg:
        iounmap(chip->regs);
        chip->regs = NULL;
 
index ec287989eafc56b89d509e48b3f8ad8999fdc0a7..c938bae18812ea5768e77b48ee2b0f1a6e39b24f 100644 (file)
@@ -265,11 +265,6 @@ static int imx_pwm_probe(struct platform_device *pdev)
        imx->chip.npwm = 1;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(imx->mmio_base))
                return PTR_ERR(imx->mmio_base);
index d1eb499fb15d2a3bbf3fec0198a3de7c9451e43b..ed6007b27585a9512d5c2470248343b3e80882d4 100644 (file)
@@ -117,11 +117,6 @@ static int pwm_probe(struct platform_device *pdev)
                return PTR_ERR(puv3->clk);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        puv3->base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(puv3->base))
                return PTR_ERR(puv3->base);
index dee6ab552a0a9439c968c695574e14e001a67339..dc9717551d395be96800008bdee1f2379b04759a 100644 (file)
@@ -147,11 +147,6 @@ static int pwm_probe(struct platform_device *pdev)
        pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pwm->mmio_base))
                return PTR_ERR(pwm->mmio_base);
index 3d75f4a88f982395a148aca6906dd1fbf27c87cf..a5402933001f9cf446192d79b1ddf204e748e2a1 100644 (file)
@@ -181,11 +181,6 @@ static int tegra_pwm_probe(struct platform_device *pdev)
        pwm->dev = &pdev->dev;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resources defined\n");
-               return -ENODEV;
-       }
-
        pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pwm->mmio_base))
                return PTR_ERR(pwm->mmio_base);
index 0d65fb2e02c7897348862bebe2ac27be8449c432..72ca42dfa733312e408f155db136d81eb1822ace 100644 (file)
@@ -240,11 +240,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
        pc->chip.npwm = 1;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pc->mmio_base))
                return PTR_ERR(pc->mmio_base);
index 6a217596942f0d6badeeff620b50d182459554a5..48a485c2e4224a334b49dda3f1a8848aaecb2bfa 100644 (file)
@@ -471,11 +471,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
        pc->chip.npwm = NUM_PWM_CHANNEL;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pc->mmio_base))
                return PTR_ERR(pc->mmio_base);
index c9c3d3a1e0eb2693ac9b1fad89f620c077323bca..3b119bc2c3c606f3c9ca697d9fa1dc5e66144e59 100644 (file)
@@ -70,11 +70,6 @@ static int pwmss_probe(struct platform_device *pdev)
        mutex_init(&info->pwmss_lock);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(info->mmio_base))
                return PTR_ERR(info->mmio_base);
index 69effd19afc700144ad35ab4fce44ccf991f8dae..323125abf3f4ddae2d6f2c48abe956c8b497b40e 100644 (file)
@@ -230,11 +230,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
        }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        chip->base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(chip->base))
                return PTR_ERR(chip->base);
index 6194d35ebb9740c0af7f999dcb7b73aec75d91ca..5ab056494bbefc63d3e3a0a3daffcedd7c0a9ef4 100644 (file)
@@ -47,4 +47,24 @@ config RAPIDIO_DEBUG
 
          If you are unsure about this, say N here.
 
+choice
+       prompt "Enumeration method"
+       depends on RAPIDIO
+       default RAPIDIO_ENUM_BASIC
+       help
+         There are different enumeration and discovery mechanisms offered
+         for RapidIO subsystem. You may select single built-in method or
+         or any number of methods to be built as modules.
+         Selecting a built-in method disables use of loadable methods.
+
+         If unsure, select Basic built-in.
+
+config RAPIDIO_ENUM_BASIC
+       tristate "Basic"
+       help
+         This option includes basic RapidIO fabric enumeration and discovery
+         mechanism similar to one described in RapidIO specification Annex 1.
+
+endchoice
+
 source "drivers/rapidio/switches/Kconfig"
index ec3fb81210041e532206faac578cc9c897edaf82..3036702ffe8b5950e83c9979c6e596fb9aa57db3 100644 (file)
@@ -1,7 +1,8 @@
 #
 # Makefile for RapidIO interconnect services
 #
-obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
+obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o
+obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
 
 obj-$(CONFIG_RAPIDIO)          += switches/
 obj-$(CONFIG_RAPIDIO)          += devices/
index 6faba406b6e9f705bb047232b796d8476e89e1db..a8b2c23a7ef4b7acbe4a771fbf72a2ddffded52f 100644 (file)
@@ -471,6 +471,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
        u32 intval;
        u32 ch_inte;
 
+       /* For MSI mode disable all device-level interrupts */
+       if (priv->flags & TSI721_USING_MSI)
+               iowrite32(0, priv->regs + TSI721_DEV_INTE);
+
        dev_int = ioread32(priv->regs + TSI721_DEV_INT);
        if (!dev_int)
                return IRQ_NONE;
@@ -560,6 +564,14 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
                }
        }
 #endif
+
+       /* For MSI mode re-enable device-level interrupts */
+       if (priv->flags & TSI721_USING_MSI) {
+               dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+                       TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
+               iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
+       }
+
        return IRQ_HANDLED;
 }
 
index 0f4a53bdaa3cf9b8e60c4c8dfbf5d6f80b3cf882..a0c875563d7669fbf69916c95bd0b0219417d985 100644 (file)
@@ -164,6 +164,13 @@ void rio_unregister_driver(struct rio_driver *rdrv)
        driver_unregister(&rdrv->driver);
 }
 
+void rio_attach_device(struct rio_dev *rdev)
+{
+       rdev->dev.bus = &rio_bus_type;
+       rdev->dev.parent = &rio_bus;
+}
+EXPORT_SYMBOL_GPL(rio_attach_device);
+
 /**
  *  rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
  *  @dev: the standard device structure to match against
@@ -200,6 +207,7 @@ struct bus_type rio_bus_type = {
        .name = "rapidio",
        .match = rio_match_bus,
        .dev_attrs = rio_dev_attrs,
+       .bus_attrs = rio_bus_attrs,
        .probe = rio_device_probe,
        .remove = rio_device_remove,
 };
index a965acd3c0e4e9f6903f95c4fdb7d8f6331669d3..4c15dbf810871e04f55bb28d30d0f7e2d1cc826d 100644 (file)
 
 #include "rio.h"
 
-LIST_HEAD(rio_devices);
-
 static void rio_init_em(struct rio_dev *rdev);
 
-DEFINE_SPINLOCK(rio_global_list_lock);
-
 static int next_destid = 0;
 static int next_comptag = 1;
 
@@ -326,127 +322,6 @@ static int rio_is_switch(struct rio_dev *rdev)
        return 0;
 }
 
-/**
- * rio_switch_init - Sets switch operations for a particular vendor switch
- * @rdev: RIO device
- * @do_enum: Enumeration/Discovery mode flag
- *
- * Searches the RIO switch ops table for known switch types. If the vid
- * and did match a switch table entry, then call switch initialization
- * routine to setup switch-specific routines.
- */
-static void rio_switch_init(struct rio_dev *rdev, int do_enum)
-{
-       struct rio_switch_ops *cur = __start_rio_switch_ops;
-       struct rio_switch_ops *end = __end_rio_switch_ops;
-
-       while (cur < end) {
-               if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
-                       pr_debug("RIO: calling init routine for %s\n",
-                                rio_name(rdev));
-                       cur->init_hook(rdev, do_enum);
-                       break;
-               }
-               cur++;
-       }
-
-       if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
-               pr_debug("RIO: adding STD routing ops for %s\n",
-                       rio_name(rdev));
-               rdev->rswitch->add_entry = rio_std_route_add_entry;
-               rdev->rswitch->get_entry = rio_std_route_get_entry;
-               rdev->rswitch->clr_table = rio_std_route_clr_table;
-       }
-
-       if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
-               printk(KERN_ERR "RIO: missing routing ops for %s\n",
-                      rio_name(rdev));
-}
-
-/**
- * rio_add_device- Adds a RIO device to the device model
- * @rdev: RIO device
- *
- * Adds the RIO device to the global device list and adds the RIO
- * device to the RIO device list.  Creates the generic sysfs nodes
- * for an RIO device.
- */
-static int rio_add_device(struct rio_dev *rdev)
-{
-       int err;
-
-       err = device_add(&rdev->dev);
-       if (err)
-               return err;
-
-       spin_lock(&rio_global_list_lock);
-       list_add_tail(&rdev->global_list, &rio_devices);
-       spin_unlock(&rio_global_list_lock);
-
-       rio_create_sysfs_dev_files(rdev);
-
-       return 0;
-}
-
-/**
- * rio_enable_rx_tx_port - enable input receiver and output transmitter of
- * given port
- * @port: Master port associated with the RIO network
- * @local: local=1 select local port otherwise a far device is reached
- * @destid: Destination ID of the device to check host bit
- * @hopcount: Number of hops to reach the target
- * @port_num: Port (-number on switch) to enable on a far end device
- *
- * Returns 0 or 1 from on General Control Command and Status Register
- * (EXT_PTR+0x3C)
- */
-inline int rio_enable_rx_tx_port(struct rio_mport *port,
-                                int local, u16 destid,
-                                u8 hopcount, u8 port_num) {
-#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
-       u32 regval;
-       u32 ext_ftr_ptr;
-
-       /*
-       * enable rx input tx output port
-       */
-       pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
-                "%d, port_num = %d)\n", local, destid, hopcount, port_num);
-
-       ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
-
-       if (local) {
-               rio_local_read_config_32(port, ext_ftr_ptr +
-                               RIO_PORT_N_CTL_CSR(0),
-                               &regval);
-       } else {
-               if (rio_mport_read_config_32(port, destid, hopcount,
-               ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
-                       return -EIO;
-       }
-
-       if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
-               /* serial */
-               regval = regval | RIO_PORT_N_CTL_EN_RX_SER
-                               | RIO_PORT_N_CTL_EN_TX_SER;
-       } else {
-               /* parallel */
-               regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
-                               | RIO_PORT_N_CTL_EN_TX_PAR;
-       }
-
-       if (local) {
-               rio_local_write_config_32(port, ext_ftr_ptr +
-                                         RIO_PORT_N_CTL_CSR(0), regval);
-       } else {
-               if (rio_mport_write_config_32(port, destid, hopcount,
-                   ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
-                       return -EIO;
-       }
-#endif
-       return 0;
-}
-
 /**
  * rio_setup_device- Allocates and sets up a RIO device
  * @net: RIO network
@@ -587,8 +462,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
                             rdev->destid);
        }
 
-       rdev->dev.bus = &rio_bus_type;
-       rdev->dev.parent = &rio_bus;
+       rio_attach_device(rdev);
 
        device_initialize(&rdev->dev);
        rdev->dev.release = rio_release_dev;
@@ -1260,19 +1134,30 @@ static void rio_pw_enable(struct rio_mport *port, int enable)
 /**
  * rio_enum_mport- Start enumeration through a master port
  * @mport: Master port to send transactions
+ * @flags: Enumeration control flags
  *
  * Starts the enumeration process. If somebody has enumerated our
  * master port device, then give up. If not and we have an active
  * link, then start recursive peer enumeration. Returns %0 if
  * enumeration succeeds or %-EBUSY if enumeration fails.
  */
-int rio_enum_mport(struct rio_mport *mport)
+int rio_enum_mport(struct rio_mport *mport, u32 flags)
 {
        struct rio_net *net = NULL;
        int rc = 0;
 
        printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id,
               mport->name);
+
+       /*
+        * To avoid multiple start requests (repeat enumeration is not supported
+        * by this method) check if enumeration/discovery was performed for this
+        * mport: if mport was added into the list of mports for a net exit
+        * with error.
+        */
+       if (mport->nnode.next || mport->nnode.prev)
+               return -EBUSY;
+
        /* If somebody else enumerated our master port device, bail. */
        if (rio_enum_host(mport) < 0) {
                printk(KERN_INFO
@@ -1362,14 +1247,16 @@ static void rio_build_route_tables(struct rio_net *net)
 /**
  * rio_disc_mport- Start discovery through a master port
  * @mport: Master port to send transactions
+ * @flags: discovery control flags
  *
  * Starts the discovery process. If we have an active link,
- * then wait for the signal that enumeration is complete.
+ * then wait for the signal that enumeration is complete (if wait
+ * is allowed).
  * When enumeration completion is signaled, start recursive
  * peer discovery. Returns %0 if discovery succeeds or %-EBUSY
  * on failure.
  */
-int rio_disc_mport(struct rio_mport *mport)
+int rio_disc_mport(struct rio_mport *mport, u32 flags)
 {
        struct rio_net *net = NULL;
        unsigned long to_end;
@@ -1379,6 +1266,11 @@ int rio_disc_mport(struct rio_mport *mport)
 
        /* If master port has an active link, allocate net and discover peers */
        if (rio_mport_is_active(mport)) {
+               if (rio_enum_complete(mport))
+                       goto enum_done;
+               else if (flags & RIO_SCAN_ENUM_NO_WAIT)
+                       return -EAGAIN;
+
                pr_debug("RIO: wait for enumeration to complete...\n");
 
                to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ;
@@ -1421,3 +1313,41 @@ enum_done:
 bail:
        return -EBUSY;
 }
+
+static struct rio_scan rio_scan_ops = {
+       .enumerate = rio_enum_mport,
+       .discover = rio_disc_mport,
+};
+
+static bool scan;
+module_param(scan, bool, 0);
+MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery "
+                       "(default = 0)");
+
+/**
+ * rio_basic_attach:
+ *
+ * When this enumeration/discovery method is loaded as a module this function
+ * registers its specific enumeration and discover routines for all available
+ * RapidIO mport devices. The "scan" command line parameter controls ability of
+ * the module to start RapidIO enumeration/discovery automatically.
+ *
+ * Returns 0 for success or -EIO if unable to register itself.
+ *
+ * This enumeration/discovery method cannot be unloaded and therefore does not
+ * provide a matching cleanup_module routine.
+ */
+
+static int __init rio_basic_attach(void)
+{
+       if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops))
+               return -EIO;
+       if (scan)
+               rio_init_mports();
+       return 0;
+}
+
+late_initcall(rio_basic_attach);
+
+MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery");
+MODULE_LICENSE("GPL");
index 4dbe360989be8b3ed1b156e7f61aeaafb5b68fff..66d4acd5e18fd8f230cbb732a12dce1b90f98978 100644 (file)
@@ -285,3 +285,48 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
                        rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
        }
 }
+
+static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
+                               size_t count)
+{
+       long val;
+       struct rio_mport *port = NULL;
+       int rc;
+
+       if (kstrtol(buf, 0, &val) < 0)
+               return -EINVAL;
+
+       if (val == RIO_MPORT_ANY) {
+               rc = rio_init_mports();
+               goto exit;
+       }
+
+       if (val < 0 || val >= RIO_MAX_MPORTS)
+               return -EINVAL;
+
+       port = rio_find_mport((int)val);
+
+       if (!port) {
+               pr_debug("RIO: %s: mport_%d not available\n",
+                        __func__, (int)val);
+               return -EINVAL;
+       }
+
+       if (!port->nscan)
+               return -EINVAL;
+
+       if (port->host_deviceid >= 0)
+               rc = port->nscan->enumerate(port, 0);
+       else
+               rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT);
+exit:
+       if (!rc)
+               rc = count;
+
+       return rc;
+}
+
+struct bus_attribute rio_bus_attrs[] = {
+       __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store),
+       __ATTR_NULL
+};
index d553b5d137224434bc2466582d5004644039b25b..cb1c08996fbb133a2a49b68635a563abf3a2742c 100644 (file)
 
 #include "rio.h"
 
+static LIST_HEAD(rio_devices);
+static DEFINE_SPINLOCK(rio_global_list_lock);
+
 static LIST_HEAD(rio_mports);
+static DEFINE_MUTEX(rio_mport_list_lock);
 static unsigned char next_portid;
 static DEFINE_SPINLOCK(rio_mmap_lock);
 
@@ -52,6 +56,32 @@ u16 rio_local_get_device_id(struct rio_mport *port)
        return (RIO_GET_DID(port->sys_size, result));
 }
 
+/**
+ * rio_add_device- Adds a RIO device to the device model
+ * @rdev: RIO device
+ *
+ * Adds the RIO device to the global device list and adds the RIO
+ * device to the RIO device list.  Creates the generic sysfs nodes
+ * for an RIO device.
+ */
+int rio_add_device(struct rio_dev *rdev)
+{
+       int err;
+
+       err = device_add(&rdev->dev);
+       if (err)
+               return err;
+
+       spin_lock(&rio_global_list_lock);
+       list_add_tail(&rdev->global_list, &rio_devices);
+       spin_unlock(&rio_global_list_lock);
+
+       rio_create_sysfs_dev_files(rdev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rio_add_device);
+
 /**
  * rio_request_inb_mbox - request inbound mailbox service
  * @mport: RIO master port from which to allocate the mailbox resource
@@ -489,6 +519,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
 
        return ext_ftr_ptr;
 }
+EXPORT_SYMBOL_GPL(rio_mport_get_physefb);
 
 /**
  * rio_get_comptag - Begin or continue searching for a RIO device by component tag
@@ -521,6 +552,7 @@ exit:
        spin_unlock(&rio_global_list_lock);
        return rdev;
 }
+EXPORT_SYMBOL_GPL(rio_get_comptag);
 
 /**
  * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
@@ -545,6 +577,107 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
                                  regval);
        return 0;
 }
+EXPORT_SYMBOL_GPL(rio_set_port_lockout);
+
+/**
+ * rio_switch_init - Sets switch operations for a particular vendor switch
+ * @rdev: RIO device
+ * @do_enum: Enumeration/Discovery mode flag
+ *
+ * Searches the RIO switch ops table for known switch types. If the vid
+ * and did match a switch table entry, then call switch initialization
+ * routine to setup switch-specific routines.
+ */
+void rio_switch_init(struct rio_dev *rdev, int do_enum)
+{
+       struct rio_switch_ops *cur = __start_rio_switch_ops;
+       struct rio_switch_ops *end = __end_rio_switch_ops;
+
+       while (cur < end) {
+               if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
+                       pr_debug("RIO: calling init routine for %s\n",
+                                rio_name(rdev));
+                       cur->init_hook(rdev, do_enum);
+                       break;
+               }
+               cur++;
+       }
+
+       if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
+               pr_debug("RIO: adding STD routing ops for %s\n",
+                       rio_name(rdev));
+               rdev->rswitch->add_entry = rio_std_route_add_entry;
+               rdev->rswitch->get_entry = rio_std_route_get_entry;
+               rdev->rswitch->clr_table = rio_std_route_clr_table;
+       }
+
+       if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
+               printk(KERN_ERR "RIO: missing routing ops for %s\n",
+                      rio_name(rdev));
+}
+EXPORT_SYMBOL_GPL(rio_switch_init);
+
+/**
+ * rio_enable_rx_tx_port - enable input receiver and output transmitter of
+ * given port
+ * @port: Master port associated with the RIO network
+ * @local: local=1 select local port otherwise a far device is reached
+ * @destid: Destination ID of the device to check host bit
+ * @hopcount: Number of hops to reach the target
+ * @port_num: Port (-number on switch) to enable on a far end device
+ *
+ * Returns 0 or 1 from on General Control Command and Status Register
+ * (EXT_PTR+0x3C)
+ */
+int rio_enable_rx_tx_port(struct rio_mport *port,
+                         int local, u16 destid,
+                         u8 hopcount, u8 port_num)
+{
+#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
+       u32 regval;
+       u32 ext_ftr_ptr;
+
+       /*
+       * enable rx input tx output port
+       */
+       pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
+                "%d, port_num = %d)\n", local, destid, hopcount, port_num);
+
+       ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+
+       if (local) {
+               rio_local_read_config_32(port, ext_ftr_ptr +
+                               RIO_PORT_N_CTL_CSR(0),
+                               &regval);
+       } else {
+               if (rio_mport_read_config_32(port, destid, hopcount,
+               ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
+                       return -EIO;
+       }
+
+       if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
+               /* serial */
+               regval = regval | RIO_PORT_N_CTL_EN_RX_SER
+                               | RIO_PORT_N_CTL_EN_TX_SER;
+       } else {
+               /* parallel */
+               regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
+                               | RIO_PORT_N_CTL_EN_TX_PAR;
+       }
+
+       if (local) {
+               rio_local_write_config_32(port, ext_ftr_ptr +
+                                         RIO_PORT_N_CTL_CSR(0), regval);
+       } else {
+               if (rio_mport_write_config_32(port, destid, hopcount,
+                   ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+                       return -EIO;
+       }
+#endif
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port);
+
 
 /**
  * rio_chk_dev_route - Validate route to the specified device.
@@ -610,6 +743,7 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access);
 
 /**
  * rio_chk_dev_access - Validate access to the specified device.
@@ -941,6 +1075,7 @@ rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
                return RIO_GET_BLOCK_ID(reg_val);
        }
 }
+EXPORT_SYMBOL_GPL(rio_mport_get_efb);
 
 /**
  * rio_mport_get_feature - query for devices' extended features
@@ -997,6 +1132,7 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(rio_mport_get_feature);
 
 /**
  * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did
@@ -1246,6 +1382,95 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
 
 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
 
+/**
+ * rio_find_mport - find RIO mport by its ID
+ * @mport_id: number (ID) of mport device
+ *
+ * Given a RIO mport number, the desired mport is located
+ * in the global list of mports. If the mport is found, a pointer to its
+ * data structure is returned.  If no mport is found, %NULL is returned.
+ */
+struct rio_mport *rio_find_mport(int mport_id)
+{
+       struct rio_mport *port;
+
+       mutex_lock(&rio_mport_list_lock);
+       list_for_each_entry(port, &rio_mports, node) {
+               if (port->id == mport_id)
+                       goto found;
+       }
+       port = NULL;
+found:
+       mutex_unlock(&rio_mport_list_lock);
+
+       return port;
+}
+
+/**
+ * rio_register_scan - enumeration/discovery method registration interface
+ * @mport_id: mport device ID for which fabric scan routine has to be set
+ *            (RIO_MPORT_ANY = set for all available mports)
+ * @scan_ops: enumeration/discovery control structure
+ *
+ * Assigns enumeration or discovery method to the specified mport device (or all
+ * available mports if RIO_MPORT_ANY is specified).
+ * Returns error if the mport already has an enumerator attached to it.
+ * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns
+ * an error if was unable to find at least one available mport.
+ */
+int rio_register_scan(int mport_id, struct rio_scan *scan_ops)
+{
+       struct rio_mport *port;
+       int rc = -EBUSY;
+
+       mutex_lock(&rio_mport_list_lock);
+       list_for_each_entry(port, &rio_mports, node) {
+               if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
+                       if (port->nscan && mport_id == RIO_MPORT_ANY)
+                               continue;
+                       else if (port->nscan)
+                               break;
+
+                       port->nscan = scan_ops;
+                       rc = 0;
+
+                       if (mport_id != RIO_MPORT_ANY)
+                               break;
+               }
+       }
+       mutex_unlock(&rio_mport_list_lock);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(rio_register_scan);
+
+/**
+ * rio_unregister_scan - removes enumeration/discovery method from mport
+ * @mport_id: mport device ID for which fabric scan routine has to be
+ *            unregistered (RIO_MPORT_ANY = set for all available mports)
+ *
+ * Removes enumeration or discovery method assigned to the specified mport
+ * device (or all available mports if RIO_MPORT_ANY is specified).
+ */
+int rio_unregister_scan(int mport_id)
+{
+       struct rio_mport *port;
+
+       mutex_lock(&rio_mport_list_lock);
+       list_for_each_entry(port, &rio_mports, node) {
+               if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
+                       if (port->nscan)
+                               port->nscan = NULL;
+                       if (mport_id != RIO_MPORT_ANY)
+                               break;
+               }
+       }
+       mutex_unlock(&rio_mport_list_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rio_unregister_scan);
+
 static void rio_fixup_device(struct rio_dev *dev)
 {
 }
@@ -1274,7 +1499,7 @@ static void disc_work_handler(struct work_struct *_work)
        work = container_of(_work, struct rio_disc_work, work);
        pr_debug("RIO: discovery work for mport %d %s\n",
                 work->mport->id, work->mport->name);
-       rio_disc_mport(work->mport);
+       work->mport->nscan->discover(work->mport, 0);
 }
 
 int rio_init_mports(void)
@@ -1290,12 +1515,15 @@ int rio_init_mports(void)
         * First, run enumerations and check if we need to perform discovery
         * on any of the registered mports.
         */
+       mutex_lock(&rio_mport_list_lock);
        list_for_each_entry(port, &rio_mports, node) {
-               if (port->host_deviceid >= 0)
-                       rio_enum_mport(port);
-               else
+               if (port->host_deviceid >= 0) {
+                       if (port->nscan)
+                               port->nscan->enumerate(port, 0);
+               } else
                        n++;
        }
+       mutex_unlock(&rio_mport_list_lock);
 
        if (!n)
                goto no_disc;
@@ -1322,14 +1550,16 @@ int rio_init_mports(void)
        }
 
        n = 0;
+       mutex_lock(&rio_mport_list_lock);
        list_for_each_entry(port, &rio_mports, node) {
-               if (port->host_deviceid < 0) {
+               if (port->host_deviceid < 0 && port->nscan) {
                        work[n].mport = port;
                        INIT_WORK(&work[n].work, disc_work_handler);
                        queue_work(rio_wq, &work[n].work);
                        n++;
                }
        }
+       mutex_unlock(&rio_mport_list_lock);
 
        flush_workqueue(rio_wq);
        pr_debug("RIO: destroy discovery workqueue\n");
@@ -1342,8 +1572,6 @@ no_disc:
        return 0;
 }
 
-device_initcall_sync(rio_init_mports);
-
 static int hdids[RIO_MAX_MPORTS + 1];
 
 static int rio_get_hdid(int index)
@@ -1371,7 +1599,10 @@ int rio_register_mport(struct rio_mport *port)
 
        port->id = next_portid++;
        port->host_deviceid = rio_get_hdid(port->id);
+       port->nscan = NULL;
+       mutex_lock(&rio_mport_list_lock);
        list_add_tail(&port->node, &rio_mports);
+       mutex_unlock(&rio_mport_list_lock);
        return 0;
 }
 
@@ -1386,3 +1617,4 @@ EXPORT_SYMBOL_GPL(rio_request_inb_mbox);
 EXPORT_SYMBOL_GPL(rio_release_inb_mbox);
 EXPORT_SYMBOL_GPL(rio_request_outb_mbox);
 EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
+EXPORT_SYMBOL_GPL(rio_init_mports);
index b1af414f15e60f8e0cee81270321fa2d6ddf5c84..c14f864dea5cbf420fd247ec6d4584c89e932703 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/rio.h>
 
 #define RIO_MAX_CHK_RETRY      3
+#define RIO_MPORT_ANY          (-1)
 
 /* Functions internal to the RIO core code */
 
@@ -27,8 +28,6 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
 extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
                                    u8 hopcount);
 extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
-extern int rio_enum_mport(struct rio_mport *mport);
-extern int rio_disc_mport(struct rio_mport *mport);
 extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
                                   u8 hopcount, u16 table, u16 route_destid,
                                   u8 route_port);
@@ -39,10 +38,18 @@ extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
                                   u8 hopcount, u16 table);
 extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
 extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
+extern int rio_add_device(struct rio_dev *rdev);
+extern void rio_switch_init(struct rio_dev *rdev, int do_enum);
+extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
+                                u8 hopcount, u8 port_num);
+extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
+extern int rio_unregister_scan(int mport_id);
+extern void rio_attach_device(struct rio_dev *rdev);
+extern struct rio_mport *rio_find_mport(int mport_id);
 
 /* Structures internal to the RIO core code */
 extern struct device_attribute rio_dev_attrs[];
-extern spinlock_t rio_global_list_lock;
+extern struct bus_attribute rio_bus_attrs[];
 
 extern struct rio_switch_ops __start_rio_switch_ops[];
 extern struct rio_switch_ops __end_rio_switch_ops[];
index 6e501784158266d990fc6cad8fb6e0ee7733d923..815d6df8bd5f70fab479247c81c6afe7fa746841 100644 (file)
@@ -1539,7 +1539,10 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
 }
 
 /**
- * Balance enable_count of each GPIO and actual GPIO pin control.
+ * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control
+ * @rdev: regulator_dev structure
+ * @enable: enable GPIO at initial use?
+ *
  * GPIO is enabled in case of initial use. (enable_count is 0)
  * GPIO is disabled when it is not shared any more. (enable_count <= 1)
  */
@@ -2702,7 +2705,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage);
 /**
  * regulator_set_current_limit - set regulator output current limit
  * @regulator: regulator source
- * @min_uA: Minimuum supported current in uA
+ * @min_uA: Minimum supported current in uA
  * @max_uA: Maximum supported current in uA
  *
  * Sets current sink to the desired output current. This can be set during
index 89bd2faaef8cf627cddcd9f1b7a03c9b4cab5fae..ce89f7848a57f00d58c6f879cb2b3157d28fd303 100644 (file)
 static int power_state_active_cnt; /* will initialize to zero */
 static DEFINE_SPINLOCK(power_state_active_lock);
 
-int power_state_active_get(void)
-{
-       unsigned long flags;
-       int cnt;
-
-       spin_lock_irqsave(&power_state_active_lock, flags);
-       cnt = power_state_active_cnt;
-       spin_unlock_irqrestore(&power_state_active_lock, flags);
-
-       return cnt;
-}
-
 void power_state_active_enable(void)
 {
        unsigned long flags;
@@ -65,6 +53,18 @@ out:
 
 #ifdef CONFIG_REGULATOR_DEBUG
 
+static int power_state_active_get(void)
+{
+       unsigned long flags;
+       int cnt;
+
+       spin_lock_irqsave(&power_state_active_lock, flags);
+       cnt = power_state_active_cnt;
+       spin_unlock_irqrestore(&power_state_active_lock, flags);
+
+       return cnt;
+}
+
 static struct ux500_regulator_debug {
        struct dentry *dir;
        struct dentry *status_file;
index 92ceed0fc65e780216861143c8ccc25f9c16fa53..3ae44ac12a94c40a9f6fac05df05ad882b2be53b 100644 (file)
@@ -840,7 +840,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                        break;
                }
 
-               if ((id == PALMAS_REG_SMPS6) && (id == PALMAS_REG_SMPS8))
+               if ((id == PALMAS_REG_SMPS6) || (id == PALMAS_REG_SMPS8))
                        ramp_delay_support = true;
 
                if (ramp_delay_support) {
@@ -878,7 +878,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                        pmic->desc[id].vsel_mask = SMPS10_VSEL;
                        pmic->desc[id].enable_reg =
                                        PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
-                                                       PALMAS_SMPS10_STATUS);
+                                                       PALMAS_SMPS10_CTRL);
                        pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
                        pmic->desc[id].min_uV = 3750000;
                        pmic->desc[id].uV_step = 1250000;
index 0c81915b19975278daa15f99dea370bced46333c..b9838130a7b0da42a3bc0f35724c9b53b553096f 100644 (file)
@@ -20,7 +20,6 @@ if RTC_CLASS
 config RTC_HCTOSYS
        bool "Set system time from RTC on startup and resume"
        default y
-       depends on !ALWAYS_USE_PERSISTENT_CLOCK
        help
          If you say yes here, the system time (wall clock) will be set using
          the value read from a specified RTC device. This is useful to avoid
@@ -29,7 +28,6 @@ config RTC_HCTOSYS
 config RTC_SYSTOHC
        bool "Set the RTC time based on NTP synchronization"
        default y
-       depends on !ALWAYS_USE_PERSISTENT_CLOCK
        help
          If you say yes here, the system time (wall clock) will be stored
          in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
index 0eab77b22340019693aa165c8987b65bd170977f..f296f3f7db9bb72574396182f3a8dfd115e16786 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 #include <linux/interrupt.h>
+#include <linux/spinlock.h>
 #include <linux/ioctl.h>
 #include <linux/completion.h>
 #include <linux/io.h>
 
 #define AT91_RTC_EPOCH         1900UL  /* just like arch/arm/common/rtctime.c */
 
+struct at91_rtc_config {
+       bool use_shadow_imr;
+};
+
+static const struct at91_rtc_config *at91_rtc_config;
 static DECLARE_COMPLETION(at91_rtc_updated);
 static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
 static void __iomem *at91_rtc_regs;
 static int irq;
+static DEFINE_SPINLOCK(at91_rtc_lock);
+static u32 at91_rtc_shadow_imr;
+
+static void at91_rtc_write_ier(u32 mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&at91_rtc_lock, flags);
+       at91_rtc_shadow_imr |= mask;
+       at91_rtc_write(AT91_RTC_IER, mask);
+       spin_unlock_irqrestore(&at91_rtc_lock, flags);
+}
+
+static void at91_rtc_write_idr(u32 mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&at91_rtc_lock, flags);
+       at91_rtc_write(AT91_RTC_IDR, mask);
+       /*
+        * Register read back (of any RTC-register) needed to make sure
+        * IDR-register write has reached the peripheral before updating
+        * shadow mask.
+        *
+        * Note that there is still a possibility that the mask is updated
+        * before interrupts have actually been disabled in hardware. The only
+        * way to be certain would be to poll the IMR-register, which is is
+        * the very register we are trying to emulate. The register read back
+        * is a reasonable heuristic.
+        */
+       at91_rtc_read(AT91_RTC_SR);
+       at91_rtc_shadow_imr &= ~mask;
+       spin_unlock_irqrestore(&at91_rtc_lock, flags);
+}
+
+static u32 at91_rtc_read_imr(void)
+{
+       unsigned long flags;
+       u32 mask;
+
+       if (at91_rtc_config->use_shadow_imr) {
+               spin_lock_irqsave(&at91_rtc_lock, flags);
+               mask = at91_rtc_shadow_imr;
+               spin_unlock_irqrestore(&at91_rtc_lock, flags);
+       } else {
+               mask = at91_rtc_read(AT91_RTC_IMR);
+       }
+
+       return mask;
+}
 
 /*
  * Decode time/date into rtc_time structure
@@ -110,9 +166,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
        cr = at91_rtc_read(AT91_RTC_CR);
        at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
 
-       at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
+       at91_rtc_write_ier(AT91_RTC_ACKUPD);
        wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
-       at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+       at91_rtc_write_idr(AT91_RTC_ACKUPD);
 
        at91_rtc_write(AT91_RTC_TIMR,
                          bin2bcd(tm->tm_sec) << 0
@@ -144,7 +200,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
        tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
        tm->tm_year = at91_alarm_year - 1900;
 
-       alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+       alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
                        ? 1 : 0;
 
        dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -169,7 +225,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        tm.tm_min = alrm->time.tm_min;
        tm.tm_sec = alrm->time.tm_sec;
 
-       at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+       at91_rtc_write_idr(AT91_RTC_ALARM);
        at91_rtc_write(AT91_RTC_TIMALR,
                  bin2bcd(tm.tm_sec) << 0
                | bin2bcd(tm.tm_min) << 8
@@ -182,7 +238,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        if (alrm->enabled) {
                at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
-               at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+               at91_rtc_write_ier(AT91_RTC_ALARM);
        }
 
        dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -198,9 +254,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
        if (enabled) {
                at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
-               at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+               at91_rtc_write_ier(AT91_RTC_ALARM);
        } else
-               at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+               at91_rtc_write_idr(AT91_RTC_ALARM);
 
        return 0;
 }
@@ -209,7 +265,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
  */
 static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
 {
-       unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
+       unsigned long imr = at91_rtc_read_imr();
 
        seq_printf(seq, "update_IRQ\t: %s\n",
                        (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -229,7 +285,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
        unsigned int rtsr;
        unsigned long events = 0;
 
-       rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
+       rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
        if (rtsr) {             /* this interrupt is shared!  Is it ours? */
                if (rtsr & AT91_RTC_ALARM)
                        events |= (RTC_AF | RTC_IRQF);
@@ -250,6 +306,43 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
        return IRQ_NONE;                /* not handled */
 }
 
+static const struct at91_rtc_config at91rm9200_config = {
+};
+
+static const struct at91_rtc_config at91sam9x5_config = {
+       .use_shadow_imr = true,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id at91_rtc_dt_ids[] = {
+       {
+               .compatible = "atmel,at91rm9200-rtc",
+               .data = &at91rm9200_config,
+       }, {
+               .compatible = "atmel,at91sam9x5-rtc",
+               .data = &at91sam9x5_config,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
+#endif
+
+static const struct at91_rtc_config *
+at91_rtc_get_config(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+
+       if (pdev->dev.of_node) {
+               match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
+               if (!match)
+                       return NULL;
+               return (const struct at91_rtc_config *)match->data;
+       }
+
+       return &at91rm9200_config;
+}
+
 static const struct rtc_class_ops at91_rtc_ops = {
        .read_time      = at91_rtc_readtime,
        .set_time       = at91_rtc_settime,
@@ -268,6 +361,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
        struct resource *regs;
        int ret = 0;
 
+       at91_rtc_config = at91_rtc_get_config(pdev);
+       if (!at91_rtc_config)
+               return -ENODEV;
+
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!regs) {
                dev_err(&pdev->dev, "no mmio resource defined\n");
@@ -290,7 +387,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
        at91_rtc_write(AT91_RTC_MR, 0);         /* 24 hour mode */
 
        /* Disable all interrupts */
-       at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+       at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
                                        AT91_RTC_SECEV | AT91_RTC_TIMEV |
                                        AT91_RTC_CALEV);
 
@@ -335,7 +432,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
        struct rtc_device *rtc = platform_get_drvdata(pdev);
 
        /* Disable all interrupts */
-       at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+       at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
                                        AT91_RTC_SECEV | AT91_RTC_TIMEV |
                                        AT91_RTC_CALEV);
        free_irq(irq, pdev);
@@ -358,13 +455,13 @@ static int at91_rtc_suspend(struct device *dev)
        /* this IRQ is shared with DBGU and other hardware which isn't
         * necessarily doing PM like we are...
         */
-       at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
+       at91_rtc_imr = at91_rtc_read_imr()
                        & (AT91_RTC_ALARM|AT91_RTC_SECEV);
        if (at91_rtc_imr) {
                if (device_may_wakeup(dev))
                        enable_irq_wake(irq);
                else
-                       at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
+                       at91_rtc_write_idr(at91_rtc_imr);
        }
        return 0;
 }
@@ -375,7 +472,7 @@ static int at91_rtc_resume(struct device *dev)
                if (device_may_wakeup(dev))
                        disable_irq_wake(irq);
                else
-                       at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
+                       at91_rtc_write_ier(at91_rtc_imr);
        }
        return 0;
 }
@@ -383,12 +480,6 @@ static int at91_rtc_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
 
-static const struct of_device_id at91_rtc_dt_ids[] = {
-       { .compatible = "atmel,at91rm9200-rtc" },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
-
 static struct platform_driver at91_rtc_driver = {
        .remove         = __exit_p(at91_rtc_remove),
        .driver         = {
index cc5bea9c4b1c3778ff817b973b52f29c4bee64bf..f1cb706445c739d7eee047a06482e63f0444de83 100644 (file)
@@ -854,6 +854,9 @@ static int cmos_resume(struct device *dev)
                }
 
                spin_lock_irq(&rtc_lock);
+               if (device_may_wakeup(dev))
+                       hpet_rtc_timer_init();
+
                do {
                        CMOS_WRITE(tmp, RTC_CONTROL);
                        hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
@@ -869,7 +872,6 @@ static int cmos_resume(struct device *dev)
                        rtc_update_irq(cmos->rtc, 1, mask);
                        tmp &= ~RTC_AIE;
                        hpet_mask_rtc_irq_bit(RTC_AIE);
-                       hpet_rtc_timer_init();
                } while (mask & RTC_AIE);
                spin_unlock_irq(&rtc_lock);
        }
index 48b6612fae7fb59b13804972e2709884f79da92a..d5af7baa48b56c893e449b418d158f7b25448628 100644 (file)
@@ -285,7 +285,7 @@ static int max8998_rtc_probe(struct platform_device *pdev)
                        info->irq, ret);
 
        dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
-       if (pdata->rtc_delay) {
+       if (pdata && pdata->rtc_delay) {
                info->lp3974_bug_workaround = true;
                dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
                                " RTC updates will be extremely slow.\n");
index f5dfb6e5e7d9b927e69e52cd3bce2ae801c1171e..d592e2fe43f7a7fd29be1592080c03d854ed22d3 100644 (file)
@@ -234,11 +234,6 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "platform_get_resource failed\n");
-               return -ENXIO;
-       }
-
        nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(nuc900_rtc->rtc_reg))
                return PTR_ERR(nuc900_rtc->rtc_reg);
index 4e1bdb832e37cc9f50018343b22628ca8c364e83..b0ba3fc991ea2352de71d1839480f967ef8a52b0 100644 (file)
@@ -347,11 +347,6 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               pr_debug("%s: RTC resource data missing\n", pdev->name);
-               return -ENOENT;
-       }
-
        rtc_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(rtc_base))
                return PTR_ERR(rtc_base);
index 8900ea784817a66d12300f844a162026580e42c0..0f0609b1aa2ccfead83ec98db660a58fac5736de 100644 (file)
@@ -306,7 +306,7 @@ static int pl031_remove(struct amba_device *adev)
        struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
 
        amba_set_drvdata(adev, NULL);
-       free_irq(adev->irq[0], ldata->rtc);
+       free_irq(adev->irq[0], ldata);
        rtc_device_unregister(ldata->rtc);
        iounmap(ldata->base);
        kfree(ldata);
index 14040b22888de32607eef344e31efe574cec5250..0b495e8b8e66958d0781086264b7c8653b3d0eed 100644 (file)
@@ -477,11 +477,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
        /* get the memory region */
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL) {
-               dev_err(&pdev->dev, "failed to get memory region resource\n");
-               return -ENOENT;
-       }
-
        s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(s3c_rtc_base))
                return PTR_ERR(s3c_rtc_base);
index a34315d25478131c00ef1b308bb91f4152f25cae..76af92ad5a8ac418fa846e91935a0ae80fdbfc72 100644 (file)
@@ -322,12 +322,6 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev,
-                       "Unable to allocate resources for device.\n");
-               return -EBUSY;
-       }
-
        info->rtc_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(info->rtc_base))
                return PTR_ERR(info->rtc_base);
index 459c2ffc95a6b3a9b59d6baf997949a0ac13ac0f..426901cef14fe3067fedb5eeb37be055315d2a51 100644 (file)
@@ -273,6 +273,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
                return ret;
        }
 
+       device_init_wakeup(&pdev->dev, 1);
+
        platform_set_drvdata(pdev, rtc);
        rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
                                       &tps6586x_rtc_ops, THIS_MODULE);
@@ -292,7 +294,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
                goto fail_rtc_register;
        }
        disable_irq(rtc->irq);
-       device_set_wakeup_capable(&pdev->dev, 1);
        return 0;
 
 fail_rtc_register:
index 8751a5240c99f775ba8cba71dd2754bd76720410..b2eab34f38d96b84e61c1fa0e72b5ad4e90e2897 100644 (file)
@@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
        }
 
        platform_set_drvdata(pdev, rtc);
+       device_init_wakeup(&pdev->dev, 1);
        return 0;
 
 out2:
index 4361d9772c42ad8482e3ddbab2c7edf36294b579..d72a9216ee2e970d73a4758fbf1ccd1ccc366066 100644 (file)
@@ -3440,8 +3440,16 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
                        device->path_data.opm &= ~eventlpm;
                        device->path_data.ppm &= ~eventlpm;
                        device->path_data.npm &= ~eventlpm;
-                       if (oldopm && !device->path_data.opm)
-                               dasd_generic_last_path_gone(device);
+                       if (oldopm && !device->path_data.opm) {
+                               dev_warn(&device->cdev->dev,
+                                        "No verified channel paths remain "
+                                        "for the device\n");
+                               DBF_DEV_EVENT(DBF_WARNING, device,
+                                             "%s", "last verified path gone");
+                               dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+                               dasd_device_set_stop_bits(device,
+                                                         DASD_STOPPED_DC_WAIT);
+                       }
                }
                if (path_event[chp] & PE_PATH_AVAILABLE) {
                        device->path_data.opm &= ~eventlpm;
index 690c3338a8ae5a6e4a9b9a5ee3fbedb0b1ec8c82..464dd29d06c07dcbeb1e6ee96a439dbb037a25a9 100644 (file)
@@ -343,6 +343,7 @@ static int __init xpram_setup_blkdev(void)
                        put_disk(xpram_disks[i]);
                        goto out;
                }
+               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
                blk_queue_make_request(xpram_queues[i], xpram_make_request);
                blk_queue_logical_block_size(xpram_queues[i], 4096);
        }
index 21fabc6d5a9c83bac80fda8d78495c26f9febe64..6c440d4349d4f69506fc2615074d6989d37a7c3b 100644 (file)
@@ -352,12 +352,48 @@ static ssize_t chp_shared_show(struct device *dev,
 
 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
 
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct channel_path *chp = to_channelpath(dev);
+       ssize_t rc;
+
+       mutex_lock(&chp->lock);
+       if (chp->desc_fmt1.flags & 0x10)
+               rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+       else
+               rc = 0;
+       mutex_unlock(&chp->lock);
+
+       return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct channel_path *chp = to_channelpath(dev);
+       ssize_t rc;
+
+       mutex_lock(&chp->lock);
+       if (chp->desc_fmt1.flags & 0x10)
+               rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+       else
+               rc = 0;
+       mutex_unlock(&chp->lock);
+
+       return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
 static struct attribute *chp_attrs[] = {
        &dev_attr_status.attr,
        &dev_attr_configure.attr,
        &dev_attr_type.attr,
        &dev_attr_cmg.attr,
        &dev_attr_shared.attr,
+       &dev_attr_chid.attr,
+       &dev_attr_chid_external.attr,
        NULL,
 };
 static struct attribute_group chp_attr_group = {
index 349d5fc471963a20d7f1dd4fec393ef44877d6a6..e7ef2a683b8fbc617368ad855f4f18d6832b3eb6 100644 (file)
@@ -43,7 +43,9 @@ struct channel_path_desc_fmt1 {
        u8 chpid;
        u32:24;
        u8 chpp;
-       u32 unused[3];
+       u32 unused[2];
+       u16 chid;
+       u32:16;
        u16 mdc;
        u16:13;
        u8 r:1;
index 4ffa66c87ea509ebd914938364fb16eb205fde7b..279ad504ec3c85e3e410d231fac82e812a75b5d2 100644 (file)
@@ -130,26 +130,6 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
 /**
  * some more debug stuff
  */
-#define IUCV_HEXDUMP16(importance,header,ptr) \
-PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
-                  "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
-                  *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
-                  *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
-                  *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
-                  *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
-                  *(((char*)ptr)+12),*(((char*)ptr)+13), \
-                  *(((char*)ptr)+14),*(((char*)ptr)+15)); \
-PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
-                  "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
-                  *(((char*)ptr)+16),*(((char*)ptr)+17), \
-                  *(((char*)ptr)+18),*(((char*)ptr)+19), \
-                  *(((char*)ptr)+20),*(((char*)ptr)+21), \
-                  *(((char*)ptr)+22),*(((char*)ptr)+23), \
-                  *(((char*)ptr)+24),*(((char*)ptr)+25), \
-                  *(((char*)ptr)+26),*(((char*)ptr)+27), \
-                  *(((char*)ptr)+28),*(((char*)ptr)+29), \
-                  *(((char*)ptr)+30),*(((char*)ptr)+31));
-
 #define PRINTK_HEADER " iucv: "       /* for debugging */
 
 /* dummy device to make sure netiucv_pm functions are called */
@@ -2040,6 +2020,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
                           netiucv_setup_netdevice);
        if (!dev)
                return NULL;
+       rtnl_lock();
        if (dev_alloc_name(dev, dev->name) < 0)
                goto out_netdev;
 
@@ -2061,6 +2042,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
 out_fsm:
        kfree_fsm(privptr->fsm);
 out_netdev:
+       rtnl_unlock();
        free_netdev(dev);
        return NULL;
 }
@@ -2100,6 +2082,7 @@ static ssize_t conn_write(struct device_driver *drv,
 
        rc = netiucv_register_device(dev);
        if (rc) {
+               rtnl_unlock();
                IUCV_DBF_TEXT_(setup, 2,
                        "ret %d from netiucv_register_device\n", rc);
                goto out_free_ndev;
@@ -2109,7 +2092,8 @@ static ssize_t conn_write(struct device_driver *drv,
        priv = netdev_priv(dev);
        SET_NETDEV_DEV(dev, priv->dev);
 
-       rc = register_netdev(dev);
+       rc = register_netdevice(dev);
+       rtnl_unlock();
        if (rc)
                goto out_unreg;
 
index c4f392d5db4cd595dfea3211fbcc8f99a81f48c2..41ef94320ee85d2e256b1bd10598f2200ec9ef16 100644 (file)
@@ -738,7 +738,7 @@ struct qeth_rx {
        int qdio_err;
 };
 
-#define QETH_NAPI_WEIGHT 128
+#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
 
 struct qeth_card {
        struct list_head list;
index 6cd0fc1b203a2c6e8147dc37d980933ff9b7e89f..e4ca70475190ceb4be5e75ccd6df014232428d3d 100644 (file)
@@ -1282,8 +1282,10 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
 
        qeth_free_cq(card);
        cancel_delayed_work_sync(&card->buffer_reclaim_work);
-       for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
-               dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
+       for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+               if (card->qdio.in_q->bufs[j].rx_skb)
+                       dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
+       }
        kfree(card->qdio.in_q);
        card->qdio.in_q = NULL;
        /* inbound buffer pool */
@@ -1729,14 +1731,14 @@ static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
        QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
 
        if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
-           (prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
-               card->info.blkt.time_total = 250;
-               card->info.blkt.inter_packet = 5;
-               card->info.blkt.inter_packet_jumbo = 15;
-       } else {
+           prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
                card->info.blkt.time_total = 0;
                card->info.blkt.inter_packet = 0;
                card->info.blkt.inter_packet_jumbo = 0;
+       } else {
+               card->info.blkt.time_total = 250;
+               card->info.blkt.inter_packet = 5;
+               card->info.blkt.inter_packet_jumbo = 15;
        }
 }
 
@@ -2198,11 +2200,11 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
                case QETH_LINK_TYPE_LANE_TR:
                        return 2000;
                default:
-                       return 1492;
+                       return card->options.layer2 ? 1500 : 1492;
                }
        case QETH_CARD_TYPE_OSM:
        case QETH_CARD_TYPE_OSX:
-               return 1492;
+               return card->options.layer2 ? 1500 : 1492;
        default:
                return 1500;
        }
@@ -2275,9 +2277,10 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
                card->info.max_mtu = mtu;
                card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
        } else {
-               card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
                card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
                        iob->data);
+               card->info.initial_mtu = min(card->info.max_mtu,
+                                       qeth_get_initial_mtu_for_card(card));
                card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
        }
 
index 439c012be763646c48be2910d67a3acce50c3d48..b63d534192e33a8defea70b6c0195293392ea98e 100644 (file)
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
                file->f_pos += offset;
                break;
        case 2:
-               file->f_pos = debug->buffer_len - offset;
+               file->f_pos = debug->buffer_len + offset;
                break;
        default:
                return -EINVAL;
index 3fecf35ba2926f6e27d29ae0463d9b97755c3a92..9138d4edfa5c04a42e9e547025d746952332202a 100644 (file)
@@ -358,7 +358,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
                return DIV_ROUND_UP(skb->len, 8);
        flits = skb_transport_offset(skb) / 8;
        cnt = skb_shinfo(skb)->nr_frags;
-       if (skb->tail != skb->transport_header)
+       if (skb_tail_pointer(skb) != skb_transport_header(skb))
                cnt++;
        return flits + sgl_len(cnt);
 }
index 292b24f9bf935f119ab12dbbca96d5a00d16783d..ee721b6cbcdf891c4d08250e68ccfb9e82c6ceb2 100644 (file)
@@ -1975,7 +1975,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 {
        struct fcoe_ctlr_device *cdev;
        struct fc_lport *lport = NULL;
-       struct net_device *netdev = ptr;
+       struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
        struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
        struct fcoe_port *port;
index f3a5a53e863133203044814f892286704cf70630..01adbe0ec53b89af776957d1b4cc58038aae818a 100644 (file)
@@ -704,7 +704,7 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
 static int libfcoe_device_notification(struct notifier_block *notifier,
                                    ulong event, void *ptr)
 {
-       struct net_device *netdev = ptr;
+       struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
 
        switch (event) {
        case NETDEV_UNREGISTER:
index adc1f7f471f554dd3bc48825ab45bd65df6ed3af..85e1ffd0e5c5af17e53305ad016f28898d65c8f7 100644 (file)
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
                pos = file->f_pos + offset;
                break;
        case 2:
-               pos = fnic_dbg_prt->buffer_len - offset;
+               pos = fnic_dbg_prt->buffer_len + offset;
        }
        return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
                          -EINVAL : (file->f_pos = pos);
index 552e8a2b6f5f35da246bd06f44fb55fbda5f6ce4..448eae850b9c9c3d12ba2302ac2a18c843c70f87 100644 (file)
@@ -906,7 +906,6 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
                        ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n",
                                      consumed);
                        *status = ISCSI_TCP_SKB_DONE;
-                       skb_abort_seq_read(&seq);
                        goto skb_done;
                }
                BUG_ON(segment->copied >= segment->size);
index f63f5ff7f27467f7b34f80d3bc0089538dc147df..f525ecb7a9c6e187bf1097a6baf7aabad96377a4 100644 (file)
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
                pos = file->f_pos + off;
                break;
        case 2:
-               pos = debug->len - off;
+               pos = debug->len + off;
        }
        return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
 }
index d182c96e17ea48ef0427a67c461d8e9bc828b5c2..7a3870f385f63adcc7ed23db355af7fe0682fb93 100644 (file)
@@ -1370,7 +1370,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
                dump_stack();
                return;
        }
-       target_wait_for_sess_cmds(se_sess, 0);
+       target_wait_for_sess_cmds(se_sess);
 
        transport_deregister_session_configfs(sess->se_sess);
        transport_deregister_session(sess->se_sess);
index db66357211ed167d4df1019f151b7e99214788c8..86f0c5d5c116f74d2117806e45ec4599d750837a 100644 (file)
@@ -84,6 +84,7 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file)
 
 static const struct file_operations proc_scsi_fops = {
        .open = proc_scsi_host_open,
+       .release = single_release,
        .read = seq_read,
        .llseek = seq_lseek,
        .write = proc_scsi_host_write
index 787bd2c22bca44043b615d8b3b4d70663a3d36a9..380387a47b1d86fe4e6a8a7ef22a537bfb85517a 100644 (file)
@@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
        }
 
        if (xfer->tx_buf)
-               spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
+               if (xfer->bits_per_word > 8)
+                       spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
+               else
+                       spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
        else
                spi_writel(as, TDR, 0);
 
        dev_dbg(master->dev.parent,
-               "  start pio xfer %p: len %u tx %p rx %p\n",
-               xfer, xfer->len, xfer->tx_buf, xfer->rx_buf);
+               "  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
+               xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+               xfer->bits_per_word);
 
        /* Enable relevant interrupts */
        spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
@@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
 {
        u8              *txp;
        u8              *rxp;
+       u16             *txp16;
+       u16             *rxp16;
        unsigned long   xfer_pos = xfer->len - as->current_remaining_bytes;
 
        if (xfer->rx_buf) {
-               rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
-               *rxp = spi_readl(as, RDR);
+               if (xfer->bits_per_word > 8) {
+                       rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+                       *rxp16 = spi_readl(as, RDR);
+               } else {
+                       rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+                       *rxp = spi_readl(as, RDR);
+               }
        } else {
                spi_readl(as, RDR);
        }
-
-       as->current_remaining_bytes--;
+       if (xfer->bits_per_word > 8) {
+               as->current_remaining_bytes -= 2;
+               if (as->current_remaining_bytes < 0)
+                       as->current_remaining_bytes = 0;
+       } else {
+               as->current_remaining_bytes--;
+       }
 
        if (as->current_remaining_bytes) {
                if (xfer->tx_buf) {
-                       txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
-                       spi_writel(as, TDR, *txp);
+                       if (xfer->bits_per_word > 8) {
+                               txp16 = (u16 *)(((u8 *)xfer->tx_buf)
+                                                       + xfer_pos + 2);
+                               spi_writel(as, TDR, *txp16);
+                       } else {
+                               txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
+                               spi_writel(as, TDR, *txp);
+                       }
                } else {
                        spi_writel(as, TDR, 0);
                }
@@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
                        }
                }
 
+               if (xfer->bits_per_word > 8) {
+                       if (xfer->len % 2) {
+                               dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
+                               return -EINVAL;
+                       }
+               }
+
                /* FIXME implement these protocol options!! */
-               if (xfer->speed_hz) {
-                       dev_dbg(&spi->dev, "no protocol options yet\n");
+               if (xfer->speed_hz < spi->max_speed_hz) {
+                       dev_dbg(&spi->dev, "can't change speed in transfer\n");
                        return -ENOPROTOOPT;
                }
 
index 2e8f24a1fb952cbfd86b161ad50ac3e315d0850b..50b13c9b1ab691fd5defcae44b98dc4bfccb5557 100644 (file)
@@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = {
        },
        { },
 };
-MODULE_DEVICE_TABLE(of, davini_spi_of_match);
+MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
 
 /**
  * spi_davinci_get_pdata - Get platform data from DTS binding
index 60cfae51c713818ef630f802db2e2e8c747f98d7..eab593eaaafa00f1c269dda9f60a70de72f1d98b 100644 (file)
@@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
                if ((mask & hspi_read(hspi, SPSR)) == val)
                        return 0;
 
-               msleep(20);
+               udelay(10);
        }
 
        dev_err(hspi->dev, "timeout\n");
index d65c000efe3530c1997e996c406eb5880f4df5a4..09df8e22dba0a146c540248cea5374fa0fb13c92 100644 (file)
@@ -489,11 +489,6 @@ static int tegra_sflash_probe(struct platform_device *pdev)
        tegra_sflash_parse_dt(tsd);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "No IO memory resource\n");
-               ret = -ENODEV;
-               goto exit_free_master;
-       }
        tsd->base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(tsd->base)) {
                ret = PTR_ERR(tsd->base);
index 35f60bd252dda9e6ce61d474cf317eb301c85ff4..637d728fbeb5b3d2c573baf5c19c72e6dd542a3d 100644 (file)
@@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
        return 0;
 
 err_spi_register_master:
-       free_irq(board_dat->pdev->irq, board_dat);
+       free_irq(board_dat->pdev->irq, data);
 err_request_irq:
        pch_spi_free_resources(board_dat, data);
 err_spi_get_resources:
@@ -1667,6 +1667,7 @@ static int pch_spi_probe(struct pci_dev *pdev,
                pd_dev = platform_device_alloc("pch-spi", i);
                if (!pd_dev) {
                        dev_err(&pdev->dev, "platform_device_alloc failed\n");
+                       retval = -ENOMEM;
                        goto err_platform_device;
                }
                pd_dev_save->pd_save[i] = pd_dev;
index e1d7696074253d9a76a1c1744ad94f973ba96616..34d18dcfa0db3c4da739129b1d1008f18ee06cfc 100644 (file)
@@ -267,7 +267,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 {
        struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
        u32 ipif_ier;
-       u16 cr;
 
        /* We get here with transmitter inhibited */
 
@@ -276,7 +275,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
        xspi->remaining_bytes = t->len;
        INIT_COMPLETION(xspi->done);
 
-       xilinx_spi_fill_tx_fifo(xspi);
 
        /* Enable the transmit empty interrupt, which we use to determine
         * progress on the transmission.
@@ -285,12 +283,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
        xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
                xspi->regs + XIPIF_V123B_IIER_OFFSET);
 
-       /* Start the transfer by not inhibiting the transmitter any longer */
-       cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
-               ~XSPI_CR_TRANS_INHIBIT;
-       xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+       for (;;) {
+               u16 cr;
+               u8 sr;
+
+               xilinx_spi_fill_tx_fifo(xspi);
+
+               /* Start the transfer by not inhibiting the transmitter any
+                * longer
+                */
+               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
+                                                       ~XSPI_CR_TRANS_INHIBIT;
+               xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+               wait_for_completion(&xspi->done);
+
+               /* A transmit has just completed. Process received data and
+                * check for more data to transmit. Always inhibit the
+                * transmitter while the Isr refills the transmit register/FIFO,
+                * or make sure it is stopped if we're done.
+                */
+               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+               xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+                              xspi->regs + XSPI_CR_OFFSET);
+
+               /* Read out all the data from the Rx FIFO */
+               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+               while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+                       xspi->rx_fn(xspi);
+                       sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+               }
 
-       wait_for_completion(&xspi->done);
+               /* See if there is more data to send */
+               if (!xspi->remaining_bytes > 0)
+                       break;
+       }
 
        /* Disable the transmit empty interrupt */
        xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
@@ -314,38 +341,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
        xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
 
        if (ipif_isr & XSPI_INTR_TX_EMPTY) {    /* Transmission completed */
-               u16 cr;
-               u8 sr;
-
-               /* A transmit has just completed. Process received data and
-                * check for more data to transmit. Always inhibit the
-                * transmitter while the Isr refills the transmit register/FIFO,
-                * or make sure it is stopped if we're done.
-                */
-               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
-               xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
-                       xspi->regs + XSPI_CR_OFFSET);
-
-               /* Read out all the data from the Rx FIFO */
-               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-               while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
-                       xspi->rx_fn(xspi);
-                       sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-               }
-
-               /* See if there is more data to send */
-               if (xspi->remaining_bytes > 0) {
-                       xilinx_spi_fill_tx_fifo(xspi);
-                       /* Start the transfer by not inhibiting the
-                        * transmitter any longer
-                        */
-                       xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
-               } else {
-                       /* No more data to send.
-                        * Indicate the transfer is completed.
-                        */
-                       complete(&xspi->done);
-               }
+               complete(&xspi->done);
        }
 
        return IRQ_HANDLED;
index 163fd802b7aced2217494397297de76862056ea5..32b7bb111eb6b53d9e96808f7bd5fd0982cffd9d 100644 (file)
@@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
        spi->dev.parent = &master->dev;
        spi->dev.bus = &spi_bus_type;
        spi->dev.release = spidev_release;
-       spi->cs_gpio = -EINVAL;
+       spi->cs_gpio = -ENOENT;
        device_initialize(&spi->dev);
        return spi;
 }
@@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master)
        nb = of_gpio_named_count(np, "cs-gpios");
        master->num_chipselect = max(nb, (int)master->num_chipselect);
 
-       if (nb < 1)
+       /* Return error only for an incorrectly formed cs-gpios property */
+       if (nb == 0 || nb == -ENOENT)
                return 0;
+       else if (nb < 0)
+               return nb;
 
        cs = devm_kzalloc(&master->dev,
                          sizeof(int) * master->num_chipselect,
@@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master)
                return -ENOMEM;
 
        for (i = 0; i < master->num_chipselect; i++)
-               cs[i] = -EINVAL;
+               cs[i] = -ENOENT;
 
        for (i = 0; i < nb; i++)
                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
index a3b23644b0fbb42218285c571656a8db2154517d..e753fbe302a750ffa3aa13d3f5b402ad9240a06d 100644 (file)
@@ -54,7 +54,7 @@ static int hex2sprom(u16 *sprom, const char *dump, size_t len,
        while (cnt < sprom_size_words) {
                memcpy(tmp, dump, 4);
                dump += 4;
-               err = strict_strtoul(tmp, 16, &parsed);
+               err = kstrtoul(tmp, 16, &parsed);
                if (err)
                        return err;
                sprom[cnt++] = swab16((u16)parsed);
index 4e8a1794f50a893120c028bf915616216697ada6..aefe820a8005585934f9d80b2d62a69b44555b29 100644 (file)
@@ -72,10 +72,10 @@ source "drivers/staging/sep/Kconfig"
 
 source "drivers/staging/iio/Kconfig"
 
-source "drivers/staging/zram/Kconfig"
-
 source "drivers/staging/zsmalloc/Kconfig"
 
+source "drivers/staging/zram/Kconfig"
+
 source "drivers/staging/wlags49_h2/Kconfig"
 
 source "drivers/staging/wlags49_h25/Kconfig"
index ceb1c643753d9fef288d7d5eda97b5191b7726a7..6dc27dac679d9133052b7aca2ad2ea06cf1d3486 100644 (file)
@@ -264,6 +264,8 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
 
        rv = alarm_do_ioctl(file, cmd, &ts);
+       if (rv)
+               return rv;
 
        switch (ANDROID_ALARM_BASE_CMD(cmd)) {
        case ANDROID_ALARM_GET_TIME(0):
@@ -272,7 +274,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
        }
 
-       return rv;
+       return 0;
 }
 #ifdef CONFIG_COMPAT
 static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
@@ -295,6 +297,8 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
        }
 
        rv = alarm_do_ioctl(file, cmd, &ts);
+       if (rv)
+               return rv;
 
        switch (ANDROID_ALARM_BASE_CMD(cmd)) {
        case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
@@ -303,7 +307,7 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
                break;
        }
 
-       return rv;
+       return 0;
 }
 #endif
 
index b040200a5a5502d7a110e9064d25b10600196065..9bd874789ce5fc93906a78f62a68e542541bd288 100644 (file)
@@ -242,7 +242,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
  * 'log->buffer' which contains the first entry readable by 'euid'
  */
 static size_t get_next_entry_by_uid(struct logger_log *log,
-               size_t off, uid_t euid)
+               size_t off, kuid_t euid)
 {
        while (off != log->w_off) {
                struct logger_entry *entry;
@@ -251,7 +251,7 @@ static size_t get_next_entry_by_uid(struct logger_log *log,
 
                entry = get_entry_header(log, off, &scratch);
 
-               if (entry->euid == euid)
+               if (uid_eq(entry->euid, euid))
                        return off;
 
                next_len = sizeof(struct logger_entry) + entry->len;
index cc6bbd99c8e0326838246557f104d90fd11f9998..70af7d805dff191d744987f30e3b4b4093f3363e 100644 (file)
@@ -66,7 +66,7 @@ struct logger_entry {
        __s32           tid;
        __s32           sec;
        __s32           nsec;
-       uid_t           euid;
+       kuid_t          euid;
        char            msg[0];
 };
 
index 7871579bb83d8177d2ea262c38110446389fe923..87e852a0ef49140a24f72ba37cb051ad793c89d4 100644 (file)
@@ -981,6 +981,7 @@ config COMEDI_ME_DAQ
 
 config COMEDI_NI_6527
        tristate "NI 6527 support"
+       depends on HAS_DMA
        select COMEDI_MITE
        ---help---
          Enable support for the National Instruments 6527 PCI card
@@ -990,6 +991,7 @@ config COMEDI_NI_6527
 
 config COMEDI_NI_65XX
        tristate "NI 65xx static dio PCI card support"
+       depends on HAS_DMA
        select COMEDI_MITE
        ---help---
          Enable support for National Instruments 65xx static dio boards.
@@ -1003,6 +1005,7 @@ config COMEDI_NI_65XX
 
 config COMEDI_NI_660X
        tristate "NI 660x counter/timer PCI card support"
+       depends on HAS_DMA
        select COMEDI_NI_TIOCMD
        ---help---
          Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602,
@@ -1013,6 +1016,7 @@ config COMEDI_NI_660X
 
 config COMEDI_NI_670X
        tristate "NI 670x PCI card support"
+       depends on HAS_DMA
        select COMEDI_MITE
        ---help---
          Enable support for National Instruments PCI-6703 and PCI-6704
@@ -1022,6 +1026,7 @@ config COMEDI_NI_670X
 
 config COMEDI_NI_LABPC_PCI
        tristate "NI Lab-PC PCI-1200 support"
+       depends on HAS_DMA
        select COMEDI_NI_LABPC
        select COMEDI_MITE
        ---help---
@@ -1032,6 +1037,7 @@ config COMEDI_NI_LABPC_PCI
 
 config COMEDI_NI_PCIDIO
        tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support"
+       depends on HAS_DMA
        select COMEDI_MITE
        select COMEDI_8255
        ---help---
@@ -1043,6 +1049,7 @@ config COMEDI_NI_PCIDIO
 
 config COMEDI_NI_PCIMIO
        tristate "NI PCI-MIO-E series and M series support"
+       depends on HAS_DMA
        select COMEDI_NI_TIOCMD
        select COMEDI_8255
        select COMEDI_FC
@@ -1095,10 +1102,12 @@ config COMEDI_SSV_DNP
          called ssv_dnp.
 
 config COMEDI_MITE
+       depends on HAS_DMA
        tristate
 
 config COMEDI_NI_TIOCMD
        tristate
+       depends on HAS_DMA
        select COMEDI_NI_TIO
        select COMEDI_MITE
 
index ca709901fb3e841c27c9fc2a69133d3975cfece1..d4be0e68509b7f6a0b54d6bf7c412105349acdc6 100644 (file)
@@ -51,10 +51,12 @@ static void __comedi_buf_free(struct comedi_device *dev,
                        clear_bit(PG_reserved,
                                  &(virt_to_page(buf->virt_addr)->flags));
                        if (s->async_dma_dir != DMA_NONE) {
+#ifdef CONFIG_HAS_DMA
                                dma_free_coherent(dev->hw_dev,
                                                  PAGE_SIZE,
                                                  buf->virt_addr,
                                                  buf->dma_addr);
+#endif
                        } else {
                                free_page((unsigned long)buf->virt_addr);
                        }
@@ -74,6 +76,12 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
        struct comedi_buf_page *buf;
        unsigned i;
 
+       if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
+               dev_err(dev->class_dev,
+                       "dma buffer allocation not supported\n");
+               return;
+       }
+
        async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
        if (async->buf_page_list)
                pages = vmalloc(sizeof(struct page *) * n_pages);
@@ -84,11 +92,15 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
        for (i = 0; i < n_pages; i++) {
                buf = &async->buf_page_list[i];
                if (s->async_dma_dir != DMA_NONE)
+#ifdef CONFIG_HAS_DMA
                        buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
                                                            PAGE_SIZE,
                                                            &buf->dma_addr,
                                                            GFP_KERNEL |
                                                            __GFP_COMP);
+#else
+                       break;
+#endif
                else
                        buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
                if (!buf->virt_addr)
index 00f2547024ec6b1a2d2abf6abf52dbcc1697dbc8..924c54c9c31fad595d1cdf9331db17d8c40413f4 100644 (file)
@@ -246,9 +246,6 @@ static int resize_async_buffer(struct comedi_device *dev,
                return -EBUSY;
        }
 
-       if (!async->prealloc_buf)
-               return -EINVAL;
-
        /* make sure buffer is an integral number of pages
         * (we round up) */
        new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
index 3d978f34d212b4c9f925ad928496403f8039c40f..77a7bb63258034ae0dd2b88bd692a0b6da9fe317 100644 (file)
@@ -976,8 +976,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
                /* clear flip-flop to make sure 2-byte registers for
                 * count and address get set correctly */
                clear_dma_ff(devpriv->dma_chan);
-               set_dma_addr(devpriv->dma_chan,
-                            virt_to_bus(devpriv->dma_buffer));
+               set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
                /*  set appropriate size of transfer */
                devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
                if (cmd->stop_src == TRIG_COUNT &&
@@ -1089,7 +1088,7 @@ static void labpc_drain_dma(struct comedi_device *dev)
                devpriv->count -= num_points;
 
        /*  set address and count for next transfer */
-       set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer));
+       set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
        set_dma_count(devpriv->dma_chan, leftover * sample_size);
        release_dma_lock(flags);
 
@@ -1741,6 +1740,9 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
                                unsigned long dma_flags;
 
                                devpriv->dma_chan = dma_chan;
+                               devpriv->dma_addr =
+                                       virt_to_bus(devpriv->dma_buffer);
+
                                dma_flags = claim_dma_lock();
                                disable_dma(devpriv->dma_chan);
                                set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
index 615f16f271c0a8078a1a6a22339393ffdeddf2ed..4b691f5a9965098cd6edebb5be04d940ea65af19 100644 (file)
@@ -82,6 +82,7 @@ struct labpc_private {
        unsigned int divisor_b1;
        unsigned int dma_chan;  /*  dma channel to use */
        u16 *dma_buffer;        /*  buffer ai will dma into */
+       phys_addr_t dma_addr;
        /* transfer size in bytes for current transfer */
        unsigned int dma_transfer_size;
        /* we are using dma/fifo-half-full/etc. */
index a46d579016d9571eef12db6bff2e95beea47e4d2..8c5dee9b3b05188c85110ee171fb179d354bd858 100644 (file)
@@ -310,9 +310,11 @@ static int ni_gpct_insn_read(struct comedi_device *dev,
 static int ni_gpct_insn_config(struct comedi_device *dev,
                               struct comedi_subdevice *s,
                               struct comedi_insn *insn, unsigned int *data);
+#ifdef PCIDMA
 static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
 static int ni_gpct_cmdtest(struct comedi_device *dev,
                           struct comedi_subdevice *s, struct comedi_cmd *cmd);
+#endif
 static int ni_gpct_cancel(struct comedi_device *dev,
                          struct comedi_subdevice *s);
 static void handle_gpct_interrupt(struct comedi_device *dev,
@@ -4617,9 +4619,7 @@ static int ni_E_init(struct comedi_device *dev)
        for (j = 0; j < NUM_GPCT; ++j) {
                s = &dev->subdevices[NI_GPCT_SUBDEV(j)];
                s->type = COMEDI_SUBD_COUNTER;
-               s->subdev_flags =
-                   SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ
-                   /* | SDF_CMD_WRITE */ ;
+               s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
                s->n_chan = 3;
                if (board->reg_type & ni_reg_m_series_mask)
                        s->maxdata = 0xffffffff;
@@ -4628,11 +4628,14 @@ static int ni_E_init(struct comedi_device *dev)
                s->insn_read = &ni_gpct_insn_read;
                s->insn_write = &ni_gpct_insn_write;
                s->insn_config = &ni_gpct_insn_config;
+#ifdef PCIDMA
+               s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */;
                s->do_cmd = &ni_gpct_cmd;
                s->len_chanlist = 1;
                s->do_cmdtest = &ni_gpct_cmdtest;
                s->cancel = &ni_gpct_cancel;
                s->async_dma_dir = DMA_BIDIRECTIONAL;
+#endif
                s->private = &devpriv->counter_dev->counters[j];
 
                devpriv->counter_dev->counters[j].chip_index = 0;
@@ -5216,10 +5219,10 @@ static int ni_gpct_insn_write(struct comedi_device *dev,
        return ni_tio_winsn(counter, insn, data);
 }
 
+#ifdef PCIDMA
 static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
 {
        int retval;
-#ifdef PCIDMA
        struct ni_gpct *counter = s->private;
 /* const struct comedi_cmd *cmd = &s->async->cmd; */
 
@@ -5233,23 +5236,20 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
        ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
        ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
        retval = ni_tio_cmd(counter, s->async);
-#else
-       retval = -ENOTSUPP;
-#endif
        return retval;
 }
+#endif
 
+#ifdef PCIDMA
 static int ni_gpct_cmdtest(struct comedi_device *dev,
                           struct comedi_subdevice *s, struct comedi_cmd *cmd)
 {
-#ifdef PCIDMA
        struct ni_gpct *counter = s->private;
 
        return ni_tio_cmdtest(counter, cmd);
-#else
        return -ENOTSUPP;
-#endif
 }
+#endif
 
 static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
 {
index a0177d998978fe25524a333fc557bdada13d2c42..d49cdf84a496d405568024bd39c2f21577cab68f 100644 (file)
@@ -2891,7 +2891,7 @@ void uf_net_get_name(struct net_device *dev, char *name, int len)
  */
 static int
 uf_netdev_event(struct notifier_block *notif, unsigned long event, void* ptr) {
-    struct net_device *netdev = ptr;
+    struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
     netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(netdev);
     unifi_priv_t *priv = NULL;
     static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
index f0b4739c65a17eeff63facaf8158cfedbe0ba025..d15d9d58e5ac2a3022c92e9843f7de7b64685f1f 100644 (file)
@@ -2,7 +2,6 @@ config USB_DWC2
        tristate "DesignWare USB2 DRD Core Support"
        depends on USB
        depends on VIRT_TO_BUS
-       select USB_OTG_UTILS
        help
          Say Y or M here if your system has a Dual Role HighSpeed
          USB controller based on the DesignWare HSOTG IP Core.
@@ -39,6 +38,7 @@ config USB_DWC2_TRACK_MISSED_SOFS
        bool "Enable Missed SOF Tracking"
        help
          Say Y here to enable logging of missed SOF events to the dmesg log.
+         WARNING: This feature is still experimental.
          If in doubt, say N.
 
 config USB_DWC2_DEBUG_PERIODIC
index 827ab781ae9bd68f8aca7f2176babdc00cf1695e..8551ccedf0376284c4d81648db0c2cbddde6d946 100644 (file)
@@ -2804,9 +2804,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
 
        /* Set device flags indicating whether the HCD supports DMA */
        if (hsotg->core_params->dma_enable > 0) {
-               if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
-                       dev_warn(hsotg->dev,
-                                "can't enable workaround for >2GB RAM\n");
+               if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
+                       dev_warn(hsotg->dev, "can't set DMA mask\n");
                if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
                        dev_warn(hsotg->dev,
                                 "can't enable workaround for >2GB RAM\n");
index 6e5dbed6ccec06d8427b995f88bad428d6fcdf15..e24062f0a49ebd87ec2d30b06f914309d21173c0 100644 (file)
@@ -56,8 +56,6 @@
 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
 {
 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
-#warning Compiling code to track missed SOFs
-
        u16 curr_frame_number = hsotg->frame_number;
 
        if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
index 1f3d581a10787154acccc04d3bb5a9274731996b..44cce2fa6361194a36ac72dff5825575fd324bf5 100644 (file)
@@ -95,6 +95,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
 
        hsotg->dev = &dev->dev;
 
+       /*
+        * Use reasonable defaults so platforms don't have to provide these.
+        */
+       if (!dev->dev.dma_mask)
+               dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+       if (!dev->dev.coherent_dma_mask)
+               dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
        irq = platform_get_irq(dev, 0);
        if (irq < 0) {
                dev_err(&dev->dev, "missing IRQ resource\n");
@@ -102,11 +110,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
        }
 
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&dev->dev, "missing memory base resource\n");
-               return -EINVAL;
-       }
-
        hsotg->regs = devm_ioremap_resource(&dev->dev, res);
        if (IS_ERR(hsotg->regs))
                return PTR_ERR(hsotg->regs);
index 94e426e4d98b0177b72065bf630c4eae1b490925..b2330f1df7e7066ab15aae5707a79f6fc7b32b2e 100644 (file)
@@ -164,7 +164,7 @@ static const struct file_operations ft1000_proc_fops = {
 static int ft1000NotifyProc(struct notifier_block *this, unsigned long event,
                                void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct ft1000_info *info;
 
        info = netdev_priv(dev);
index eca6f0292b4bcc9d2c9829cff401d5e0bd645ab8..5ead942be680c38962c251349033e44faec9c807 100644 (file)
@@ -166,7 +166,7 @@ static const struct file_operations ft1000_proc_fops = {
 static int
 ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct ft1000_info *info;
        struct proc_dir_entry *ft1000_proc_file;
 
index 3c18efe3136575ab20559923881ad44da9bd0835..69059138de4ab334b96f44b63599be2ddceed7e9 100644 (file)
@@ -39,7 +39,7 @@ if WIMAX_GDM72XX_USB
 
 config WIMAX_GDM72XX_USB_PM
        bool "Enable power managerment support"
-       depends on USB_SUSPEND
+       depends on PM_RUNTIME
 
 endif # WIMAX_GDM72XX_USB
 
index 2856b8fd44adf808f8ca871f1960f7dec2272b93..163c638e4095f3b893668bb5d83e8e2110b76afb 100644 (file)
@@ -690,7 +690,6 @@ static void mxs_lradc_trigger_remove(struct iio_dev *iio)
 static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
 {
        struct mxs_lradc *lradc = iio_priv(iio);
-       struct iio_buffer *buffer = iio->buffer;
        int ret = 0, chan, ofs = 0;
        unsigned long enable = 0;
        uint32_t ctrl4_set = 0;
@@ -698,7 +697,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
        uint32_t ctrl1_irq = 0;
        const uint32_t chan_value = LRADC_CH_ACCUMULATE |
                ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
-       const int len = bitmap_weight(buffer->scan_mask, LRADC_MAX_TOTAL_CHANS);
+       const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS);
 
        if (!len)
                return -EINVAL;
@@ -725,7 +724,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
                lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
        writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
 
-       for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) {
+       for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
                ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
                ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
                ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
index d060f2572512af496f094036cfbc9460a19850d3..c99f890cc6c65ae9ea13a9aafa8c8258e21e3cb8 100644 (file)
@@ -1869,6 +1869,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
                dev_info(&chip->client->dev,
                                "%s: i2c device found does not match expected id\n",
                                __func__);
+               ret = -EINVAL;
                goto fail1;
        }
 
@@ -1907,7 +1908,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
                if (ret) {
                        dev_err(&clientp->dev,
                                "%s: irq request failed", __func__);
-                       goto fail2;
+                       goto fail1;
                }
        }
 
@@ -1920,17 +1921,17 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
        if (ret) {
                dev_err(&clientp->dev,
                        "%s: iio registration failed\n", __func__);
-               goto fail1;
+               goto fail2;
        }
 
        dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
 
        return 0;
 
-fail1:
+fail2:
        if (clientp->irq)
                free_irq(clientp->irq, indio_dev);
-fail2:
+fail1:
        iio_device_free(indio_dev);
 
        return ret;
index 8c9e40390f42fae3b0f52d3d989d79891d2778f1..ef699f75318636db921fd693ee798d6bedffc7dc 100644 (file)
@@ -1,6 +1,7 @@
 config DRM_IMX
        tristate "DRM Support for Freescale i.MX"
        select DRM_KMS_HELPER
+       select VIDEOMODE_HELPERS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
        depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
@@ -19,10 +20,12 @@ config DRM_IMX_FB_HELPER
 config DRM_IMX_PARALLEL_DISPLAY
        tristate "Support for parallel displays"
        depends on DRM_IMX
+       select VIDEOMODE_HELPERS
 
 config DRM_IMX_TVE
        tristate "Support for TV and VGA displays"
        depends on DRM_IMX
+       select REGMAP_MMIO
        help
          Choose this to enable the internal Television Encoder (TVe)
          found on i.MX53 processors.
@@ -30,6 +33,7 @@ config DRM_IMX_TVE
 config DRM_IMX_IPUV3_CORE
        tristate "IPUv3 core support"
        depends on DRM_IMX
+       depends on RESET_CONTROLLER
        help
          Choose this if you have a i.MX5/6 system and want
          to use the IPU. This option only enables IPU base
@@ -38,5 +42,6 @@ config DRM_IMX_IPUV3_CORE
 config DRM_IMX_IPUV3
        tristate "DRM Support for i.MX IPUv3"
        depends on DRM_IMX
+       depends on DRM_IMX_IPUV3_CORE
        help
          Choose this if you have a i.MX5 or i.MX6 processor.
index ac16344644073340cdce7f350134791e58136f62..03892de9bd7e1103665c2144212722436b97b8be 100644 (file)
@@ -670,7 +670,9 @@ static int imx_tve_probe(struct platform_device *pdev)
        tve->dac_reg = devm_regulator_get(&pdev->dev, "dac");
        if (!IS_ERR(tve->dac_reg)) {
                regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
-               regulator_enable(tve->dac_reg);
+               ret = regulator_enable(tve->dac_reg);
+               if (ret)
+                       return ret;
        }
 
        tve->clk = devm_clk_get(&pdev->dev, "tve");
index ea61c869110f053d68633e15d761fef0541d4de9..ff5c63350932b32f7bd05aa277733d0a77cb2b1f 100644 (file)
@@ -316,31 +316,14 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
 
 static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
 {
-       struct drm_pending_vblank_event *e;
-       struct timeval now;
        unsigned long flags;
        struct drm_device *drm = ipu_crtc->base.dev;
 
        spin_lock_irqsave(&drm->event_lock, flags);
-
-       e = ipu_crtc->page_flip_event;
-       if (!e) {
-               spin_unlock_irqrestore(&drm->event_lock, flags);
-               return;
-       }
-
-       do_gettimeofday(&now);
-       e->event.sequence = 0;
-       e->event.tv_sec = now.tv_sec;
-       e->event.tv_usec = now.tv_usec;
+       if (ipu_crtc->page_flip_event)
+               drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
        ipu_crtc->page_flip_event = NULL;
-
        imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
-       list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-
-       wake_up_interruptible(&e->base.file_priv->event_wait);
-
        spin_unlock_irqrestore(&drm->event_lock, flags);
 }
 
index ec32776ff5475310c37074c8ed2e5cf5cabd3d06..df6569b997b88269bfdb48fd17dc410a0da50c0a 100644 (file)
@@ -1,6 +1,7 @@
 config SOLO6X10
        tristate "Softlogic 6x10 MPEG codec cards"
        depends on PCI && VIDEO_DEV && SND && I2C
+       depends on FONTS
        select VIDEOBUF2_DMA_SG
        select VIDEOBUF2_DMA_CONTIG
        select SND_PCM
index a88959f9a07ab09a82121f81246188d4d1e8aad5..197c393c4ca752a9f50062e2852afd2bb8be89a3 100644 (file)
@@ -123,6 +123,20 @@ int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
 }
 EXPORT_SYMBOL_GPL(nvec_register_notifier);
 
+/**
+ * nvec_unregister_notifier - Unregister a notifier with nvec
+ * @nvec: A &struct nvec_chip
+ * @nb: The notifier block to unregister
+ *
+ * Unregisters a notifier with @nvec. The notifier will be removed from the
+ * atomic notifier chain.
+ */
+int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
+
 /**
  * nvec_status_notifier - The final notifier
  *
@@ -185,7 +199,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
  *
  * Free the given message
  */
-inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
+void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
 {
        if (msg != &nvec->tx_scratch)
                dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
@@ -800,11 +814,6 @@ static int tegra_nvec_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no mem resource?\n");
-               return -ENODEV;
-       }
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
@@ -815,7 +824,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       i2c_clk = clk_get(&pdev->dev, "div-clk");
+       i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
        if (IS_ERR(i2c_clk)) {
                dev_err(nvec->dev, "failed to get controller clock\n");
                return -ENODEV;
@@ -902,8 +911,11 @@ static int tegra_nvec_remove(struct platform_device *pdev)
 
        nvec_toggle_global_events(nvec, false);
        mfd_remove_devices(nvec->dev);
+       nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
        cancel_work_sync(&nvec->rx_work);
        cancel_work_sync(&nvec->tx_work);
+       /* FIXME: needs check wether nvec is responsible for power off */
+       pm_power_off = NULL;
 
        return 0;
 }
index b7a14bc0ab9153319b58bad6f30f7e538d3fc775..2b1316d87470eaa9f3c84b775854bc56d10da3ab 100644 (file)
@@ -197,9 +197,8 @@ extern int nvec_register_notifier(struct nvec_chip *nvec,
                                  struct notifier_block *nb,
                                  unsigned int events);
 
-extern int nvec_unregister_notifier(struct device *dev,
-                                   struct notifier_block *nb,
-                                   unsigned int events);
+extern int nvec_unregister_notifier(struct nvec_chip *dev,
+                                   struct notifier_block *nb);
 
 extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
 
index 7445ce6422bbc9e868b0274c3dd65bce4b3b4a08..a0ec52a4114f277938ba274b9e4cfe8632a749d7 100644 (file)
@@ -169,8 +169,15 @@ fail:
 
 static int nvec_kbd_remove(struct platform_device *pdev)
 {
+       struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+       char disable_kbd[] = { NVEC_KBD, DISABLE_KBD },
+            uncnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING,
+                                               false };
+       nvec_write_async(nvec, uncnfg_wake_key_reporting, 3);
+       nvec_write_async(nvec, disable_kbd, 2);
+       nvec_unregister_notifier(nvec, &keys_dev.notifier);
+
        input_unregister_device(keys_dev.input);
-       input_free_device(keys_dev.input);
 
        return 0;
 }
@@ -188,4 +195,5 @@ module_platform_driver(nvec_kbd_driver);
 
 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
 MODULE_DESCRIPTION("NVEC keyboard driver");
+MODULE_ALIAS("platform:nvec-kbd");
 MODULE_LICENSE("GPL");
index 296f7b9a8c8c11f32d8eb35612bd75a1cabe1b4b..aacfcd6954a371ab23e47491230818a644b7e772 100644 (file)
@@ -414,6 +414,7 @@ static int nvec_power_remove(struct platform_device *pdev)
        struct nvec_power *power = platform_get_drvdata(pdev);
 
        cancel_delayed_work_sync(&power->poller);
+       nvec_unregister_notifier(power->nvec, &power->notifier);
        switch (pdev->id) {
        case AC:
                power_supply_unregister(&nvec_psy);
index aff6b9b9f9aa76b4c460ad6e5fe410e56f72f09d..06dbb02085a936c52ee97bec1787da0ec92179d4 100644 (file)
@@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev)
        struct serio *ser_dev;
        char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
 
-       ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+       ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
        if (ser_dev == NULL)
                return -ENOMEM;
 
@@ -133,6 +133,11 @@ static int nvec_mouse_probe(struct platform_device *pdev)
 
 static int nvec_mouse_remove(struct platform_device *pdev)
 {
+       struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+       ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE);
+       ps2_stopstreaming(ps2_dev.ser_dev);
+       nvec_unregister_notifier(nvec, &ps2_dev.notifier);
        serio_unregister_port(ps2_dev.ser_dev);
 
        return 0;
@@ -179,4 +184,5 @@ module_platform_driver(nvec_mouse_driver);
 
 MODULE_DESCRIPTION("NVEC mouse driver");
 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
+MODULE_ALIAS("platform:nvec-mouse");
 MODULE_LICENSE("GPL");
index 71f5cde9ed1c1ae775f993917662f81bdb1b1c19..a18430e2abbd576b45ef8b38cb44838701caf85e 100644 (file)
@@ -1271,8 +1271,8 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
                /* Subframe drv Tx descriptor and firmware info setting */
                skb = pSendList->tx_agg_frames[i];
                tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-               tx_agg_desc = (tx_desc_819x_usb_aggr_subframe *)agg_skb->tail;
-               tx_fwinfo = (tx_fwinfo_819x_usb *)(agg_skb->tail + sizeof(tx_desc_819x_usb_aggr_subframe));
+               tx_agg_desc = (tx_desc_819x_usb_aggr_subframe *)skb_tail_pointer(agg_skb);
+               tx_fwinfo = (tx_fwinfo_819x_usb *)(skb_tail_pointer(agg_skb) + sizeof(tx_desc_819x_usb_aggr_subframe));
 
                memset(tx_fwinfo,0,sizeof(tx_fwinfo_819x_usb));
                /* DWORD 0 */
index 185b676d858ad627a737619417a04ba0dd08191f..aab945a316ea47753d82130bb4043d9f7df7b255 100644 (file)
@@ -1,6 +1,6 @@
 config DX_SEP
        tristate "Discretix SEP driver"
-       depends on PCI
+       depends on PCI && CRYPTO
        help
          Discretix SEP driver; used for the security processor subsystem
          on board the Intel Mobile Internet Device and adds SEP availability
index eda2e7d73645d785ea4a825ebffd41ad223b0bf6..6651bd819bc8138db36be845223bde5d7f1759d4 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_SILICOM
        bool "Silicom devices"
        default y
-       depends on PCI
+       depends on PCI && NETDEVICES
        ---help---
          If you have a network card (Ethernet) belonging to this class,
          say Y.
@@ -19,7 +19,7 @@ if NET_VENDOR_SILICOM
 
 config SBYPASS
        tristate "Silicom BypassCTL library support"
-       depends on PCI && NET
+       depends on PCI
        depends on m
        ---help---
          If you have a network (Ethernet) controller of this type, say Y
@@ -29,10 +29,9 @@ config SBYPASS
 
 config BPCTL
        tristate "Silicom BypassCTL net support"
-       depends on PCI && NET
+       depends on PCI
        depends on m
        select SBYPASS
-       select NET_CORE
        select MII
        ---help---
          If you have a network (Ethernet) controller of this type, say Y
index b7e570ccb759dd4514a17206238b87dbe0299282..c8ddb99e852686205538a62dcc6fd3afe9688863 100644 (file)
@@ -133,7 +133,7 @@ static unsigned long str_to_hex(char *p);
 static int bp_device_event(struct notifier_block *unused,
                           unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        static bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
        int dev_num = 0, ret = 0, ret_d = 0, time_left = 0;
        /* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */
index fe667dde43ce4b0180c0f6cbfcb526debfaed719..386362c9964ff1e8856adcaa14fa000aa82686f5 100644 (file)
@@ -1087,7 +1087,11 @@ static int synaptics_rmi4_resume(struct device *dev)
        unsigned char intr_status;
        struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
 
-       regulator_enable(rmi4_data->regulator);
+       retval = regulator_enable(rmi4_data->regulator);
+       if (retval) {
+               dev_err(dev, "Regulator enable failed (%d)\n", retval);
+               return retval;
+       }
 
        enable_irq(rmi4_data->i2c_client->irq);
        rmi4_data->touch_stopped = false;
index f4f1bf7a30fdff871cedd0cceaeb95b2cd8c24c7..c699a3058b39920a34daf1a6f63524be990e29cb 100644 (file)
@@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
             DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
                       pDevice->dev->name, pDevice->apdev->name);
        }
-       kfree(pDevice->apdev);
+       free_netdev(pDevice->apdev);
        pDevice->apdev = NULL;
     pDevice->bEnable8021x = false;
     pDevice->bEnableHostWEP = false;
index c335808211ee54b929441028e738797d457ead86..d0cf7d8a20e5640fe507f3ff1b1dc64c170f9408 100644 (file)
@@ -1345,9 +1345,12 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
                return rc;
        }
 
+       spin_lock_irq(&pDevice->lock);
+
        if (wrq->disabled) {
                pDevice->ePSMode = WMAC_POWER_CAM;
                PSvDisablePowerSaving(pDevice);
+               spin_unlock_irq(&pDevice->lock);
                return rc;
        }
        if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
@@ -1358,6 +1361,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
                pDevice->ePSMode = WMAC_POWER_FAST;
                PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
        }
+
+       spin_unlock_irq(&pDevice->lock);
+
        switch (wrq->flags & IW_POWER_MODE) {
        case IW_POWER_UNICAST_R:
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n");
index e1f91d5a0f6a4d74f6f4864b85d16e134ae046a2..a858666eae68f9fd4e4eead1e22948404ae62935 100644 (file)
 #ifndef _ZCACHE_RAMSTER_H_
 #define _ZCACHE_RAMSTER_H_
 
-#ifdef CONFIG_RAMSTER_MODULE
-#define CONFIG_RAMSTER
-#endif
-
 #ifdef CONFIG_RAMSTER
 #include "ramster/ramster.h"
 #else
index 327e4f0d98e1a57680a557ea08401f43959454f1..5b26ee977c2f34823e12cfa86e10153902d0206b 100644 (file)
@@ -1,6 +1,8 @@
 #include <linux/atomic.h>
 #include "debug.h"
 
+ssize_t ramster_foreign_eph_pages;
+ssize_t ramster_foreign_pers_pages;
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 
diff --git a/drivers/staging/zcache/ramster/ramster-howto.txt b/drivers/staging/zcache/ramster/ramster-howto.txt
new file mode 100644 (file)
index 0000000..7b1ee3b
--- /dev/null
@@ -0,0 +1,366 @@
+                       RAMSTER HOW-TO
+
+Author: Dan Magenheimer
+Ramster maintainer: Konrad Wilk <konrad.wilk@oracle.com>
+
+This is a HOWTO document for ramster which, as of this writing, is in
+the kernel as a subdirectory of zcache in drivers/staging, called ramster.
+(Zcache can be built with or without ramster functionality.)  If enabled
+and properly configured, ramster allows memory capacity load balancing
+across multiple machines in a cluster.  Further, the ramster code serves
+as an example of asynchronous access for zcache (as well as cleancache and
+frontswap) that may prove useful for future transcendent memory
+implementations, such as KVM and NVRAM.  While ramster works today on
+any network connection that supports kernel sockets, its features may
+become more interesting on future high-speed fabrics/interconnects.
+
+Ramster requires both kernel and userland support.  The userland support,
+called ramster-tools, is known to work with EL6-based distros, but is a
+set of poorly-hacked slightly-modified cluster tools based on ocfs2, which
+includes an init file, a config file, and a userland binary that interfaces
+to the kernel.  This state of userland support reflects the abysmal userland
+skills of this suitably-embarrassed author; any help/patches to turn
+ramster-tools into more distributable rpms/debs useful for a wider range
+of distros would be appreciated.  The source RPM that can be used as a
+starting point is available at:
+    http://oss.oracle.com/projects/tmem/files/RAMster/ 
+
+As a result of this author's ignorance, userland setup described in this
+HOWTO assumes an EL6 distro and is described in EL6 syntax.  Apologies
+if this offends anyone!
+
+Kernel support has only been tested on x86_64.  Systems with an active
+ocfs2 filesystem should work, but since ramster leverages a lot of
+code from ocfs2, there may be latent issues.  A kernel configuration that
+includes CONFIG_OCFS2_FS should build OK, and should certainly run OK
+if no ocfs2 filesystem is mounted.
+
+This HOWTO demonstrates memory capacity load balancing for a two-node
+cluster, where one node called the "local" node becomes overcommitted
+and the other node called the "remote" node provides additional RAM
+capacity for use by the local node.  Ramster is capable of more complex
+topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES".
+
+If you find any terms in this HOWTO unfamiliar or don't understand the
+motivation for ramster, the following LWN reading is recommended:
+-- Transcendent Memory in a Nutshell (lwn.net/Articles/454795)
+-- The future calculus of memory management (lwn.net/Articles/475681)
+And since ramster is built on top of zcache, this article may be helpful:
+-- In-kernel memory compression (lwn.net/Articles/545244)
+
+Now that you've memorized the contents of those articles, let's get started!
+
+A. PRELIMINARY
+
+1) Install two x86_64 Linux systems that are known to work when
+   upgraded to a recent upstream Linux kernel version.
+
+On each system:
+
+2) Configure, build and install, then boot Linux, just to ensure it
+   can be done with an unmodified upstream kernel.  Confirm you booted
+   the upstream kernel with "uname -a".
+
+3) If you plan to do any performance testing or unless you plan to
+   test only swapping, the "WasActive" patch is also highly recommended.
+   (Search lkml.org for WasActive, apply the patch, rebuild your kernel.)
+   For a demo or simple testing, the patch can be ignored.
+
+4) Install ramster-tools as root.  An x86_64 rpm for EL6-based systems
+   can be found at:
+    http://oss.oracle.com/projects/tmem/files/RAMster/ 
+   (Sorry but for now, non-EL6 users must recreate ramster-tools on
+   their own from source.  See above.)
+
+5) Ensure that debugfs is mounted at each boot.  Examples below assume it
+   is mounted at /sys/kernel/debug.
+
+B. BUILDING RAMSTER INTO THE KERNEL
+
+Do the following on each system:
+
+1) Using the kernel configuration mechanism of your choice, change
+   your config to include:
+
+       CONFIG_CLEANCACHE=y
+       CONFIG_FRONTSWAP=y
+       CONFIG_STAGING=y
+       CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m
+       CONFIG_ZCACHE=y
+       CONFIG_RAMSTER=y
+
+   For a linux-3.10 or later kernel, you should also set:
+
+       CONFIG_ZCACHE_DEBUG=y
+       CONFIG_RAMSTER_DEBUG=y
+
+   Before building the kernel please doublecheck your kernel config
+   file to ensure all of the settings are correct.
+
+2) Build this kernel and change your boot file (e.g. /etc/grub.conf)
+   so that the new kernel will boot.
+
+3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel.
+
+4) Reboot each system approximately simultaneously.
+
+5) Check dmesg to ensure there are some messages from ramster, prefixed
+   by "ramster:"
+
+       # dmesg | grep ramster
+
+   You should also see a lot of files in:
+
+       # ls /sys/kernel/debug/zcache
+       # ls /sys/kernel/debug/ramster
+
+   These are mostly counters for various zcache and ramster activities.
+   You should also see files in:
+
+       # ls /sys/kernel/mm/ramster
+
+   These are sysfs files that control ramster as we shall see.
+
+   Ramster now will act as a single-system zcache on each system
+   but doesn't yet know anything about the cluster so can't yet do
+   anything remotely.
+
+C. CONFIGURING THE RAMSTER CLUSTER
+
+This part can be error prone unless you are familiar with clustering
+filesystems.  We need to describe the cluster in a /etc/ramster.conf
+file and the init scripts that parse it are extremely picky about
+the syntax.
+
+1) Create a /etc/ramster.conf file and ensure it is identical on both
+   systems.  This file mimics the ocfs2 format and there is a good amount
+   of documentation that can be searched for ocfs2.conf, but you can use:
+
+       cluster:
+               name = ramster
+               node_count = 2
+       node:
+               name = system1
+               cluster = ramster
+               number = 0
+               ip_address = my.ip.ad.r1
+               ip_port = 7777
+       node:
+               name = system2
+               cluster = ramster
+               number = 1
+               ip_address = my.ip.ad.r2
+               ip_port = 7777
+
+   You must ensure that the "name" field in the file exactly matches
+   the output of "hostname" on each system; if "hostname" shows a
+   fully-qualified hostname, ensure the name is fully qualified in
+   /etc/ramster.conf.  Obviously, substitute my.ip.ad.rx with proper
+   ip addresses.
+
+2) Enable the ramster service and configure it.  If you used the
+   EL6 ramster-tools, this would be:
+
+       # chkconfig --add ramster
+       # service ramster configure
+
+   Set "load on boot" to "y", cluster to start is "ramster" (or whatever
+   name you chose in ramster.conf), heartbeat dead threshold as "500",
+   network idle timeout as "1000000".  Leave the others as default.
+
+3) Reboot both systems.  After reboot, try (assuming EL6 ramster-tools):
+
+       # service ramster status
+
+   You should see "Checking RAMSTER cluster "ramster": Online".  If you do
+   not, something is wrong and ramster will not work.  Note that you
+   should also see that the driver for "configfs" is loaded and mounted,
+   the driver for ocfs2_dlmfs is not loaded, and some numbers for network
+   parameters.  You will also see "Checking RAMSTER heartbeat: Not active".
+   That's all OK.
+
+4) Now you need to start the cluster heartbeat; the cluster is not "up"
+   until all nodes detect a heartbeat.  In a real cluster, heartbeat detection
+   is done via a cluster filesystem, but ramster doesn't require one.  Some
+   hack-y kernel code in ramster can start the heartbeat for you though if
+   you tell it what nodes are "up".  To enable the heartbeat, do:
+
+       # echo 0 > /sys/kernel/mm/ramster/manual_node_up
+       # echo 1 > /sys/kernel/mm/ramster/manual_node_up
+
+   This must be done on BOTH nodes and, to avoid timeouts, must be done
+   approximately concurrently on both nodes.  On an EL6 system, it is
+   convenient to put these lines in /etc/rc.local.  To confirm that the
+   cluster is now up, on both systems do:
+
+       # dmesg | grep ramster
+
+   You should see ramster "Accepted connection" messages in dmesg on both
+   nodes after this.  Note that if you check userland status again with
+
+       # service ramster status
+
+   you will still see "Checking RAMSTER heartbeat: Not active".  That's
+   still OK... the ramster kernel heartbeat hack doesn't communicate to
+   userland.
+
+5) You now must tell each node the node to which it should "remotify" pages.
+   On this two node cluster, we will assume the "local" node, node 0, has
+   memory overcommitted and will use ramster to utilize RAM capacity on
+   the "remote node", node 1.  To configure this, on node 0, you do:
+
+       # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+   You should see "ramster: node 1 set as remotification target" in dmesg
+   on node 0.  Again, on EL6, /etc/rc.local is a good place to put this
+   on node 0 so you don't forget to do it at each boot.
+
+6) One more step:  By default, the ramster code does not "remotify" any
+   pages; this is primarily for testing purposes, but sometimes it is
+   useful.  This may change in the future, but for now, on node 0, you do:
+
+       # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable
+       # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable
+
+   The first enables remotifying swap (persistent, aka frontswap) pages,
+   the second enables remotifying of page cache (ephemeral, cleancache)
+   pages.
+
+   On EL6, these lines can also be put in /etc/rc.local (AFTER the
+   node_up lines), or at the beginning of a script that runs a workload.
+
+7) Note that most testing has been done with both/all machines booted
+   roughly simultaneously to avoid cluster timeouts.  Ideally, you should
+   do this too unless you are trying to break ramster rather than just
+   use it. ;-)
+
+D. TESTING RAMSTER
+
+1) Note that ramster has no value unless pages get "remotified".  For
+   swap/frontswap/persistent pages, this doesn't happen unless/until
+   the workload would cause swapping to occur, at which point pages
+   are put into frontswap/zcache, and the remotification thread starts
+   working.  To get to the point where the system swaps, you either
+   need a workload for which the working set exceeds the RAM in the
+   system; or you need to somehow reduce the amount of RAM one of
+   the system sees.  This latter is easy when testing in a VM, but
+   harder on physical systems.  In some cases, "mem=xxxM" on the
+   kernel command line restricts memory, but for some values of xxx
+   the kernel may fail to boot.  One may also try creating a fixed
+   RAMdisk, doing nothing with it, but ensuring that it eats up a fixed
+   amount of RAM.
+
+2) To see if ramster is working, on the "remote node", node 1, try:
+
+       # grep . /sys/kernel/debug/ramster/foreign_*
+        # # note, that is space-dot-space between grep and the pathname
+
+   to monitor the number (and max) ephemeral and persistent pages
+   that ramster has sent.  If these stay at zero, ramster is not working
+   either because the workload on the local node (node 0) isn't creating
+   enough memory pressure or because "remotifying" isn't working.  On the
+   local system, node 0, you can watch lots of useful information also.
+   Try:
+
+       grep . /sys/kernel/debug/zcache/*pageframes* \
+               /sys/kernel/debug/zcache/*zbytes* \
+               /sys/kernel/debug/zcache/*zpages* \
+               /sys/kernel/debug/ramster/*remote*
+
+   Of particular note are the remote_*_pages_succ_get counters.  These
+   show how many disk reads and/or disk writes have been avoided on the
+   overcommitted local system by storing pages remotely using ramster.
+
+   At the risk of information overload, you can also grep:
+
+        /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/*
+
+   These show, for example, how many disk reads and/or disk writes have
+   been avoided by using zcache to optimize RAM on the local system.
+
+
+AUTOMATIC SWAP REPATRIATION
+
+You may notice that while the systems are idle, the foreign persistent
+page count on the remote machine slowly decreases.  This is because
+ramster implements "frontswap selfshrinking":  When possible, swap
+pages that have been remotified are slowly repatriated to the local
+machine.  This is so that local RAM can be used when possible and
+so that, in case of remote machine crash, the probability of loss
+of data is reduced.
+
+REBOOTING / POWEROFF
+
+If a system is shut down while some of its swap pages still reside
+on a remote system, the system may lock up during the shutdown
+sequence.  This will occur if the network is shut down before the
+swap mechansim is shut down, which is the default ordering on many
+distros.  To avoid this annoying problem, simply shut off the swap
+subsystem before starting the shutdown sequence, e.g.:
+
+       # swapoff -a
+       # reboot
+
+Ideally, this swapoff-before-ifdown ordering should be enforced permanently
+using shutdown scripts.
+
+KNOWN PROBLEMS
+
+1) You may periodically see messages such as:
+
+    ramster_r2net, message length problem
+
+   This is harmless but indicates that a node is sending messages
+   containing compressed pages that exceed the maximum for zcache
+   (PAGE_SIZE*15/16).  The sender side needs to be fixed.
+
+2) If you see a "No longer connected to node..." message or a "No connection
+   established with node X after N seconds", it is possible you may
+   be in an unrecoverable state.  If you are certain all of the
+   appropriate cluster configuration steps described above have been
+   performed, try rebooting the two servers concurrently to see if
+   the cluster starts.
+
+   Note that "Connection to node... shutdown, state 7" is an intermediate
+   connection state.  As long as you later see "Accepted connection", the
+   intermediate states are harmless.
+
+3) There are known issues in counting certain values.  As a result
+   you may see periodic warnings from the kernel.  Almost always you
+   will see "ramster: bad accounting for XXX".  There are also "WARN_ONCE"
+   messages.  If you see kernel warnings with a tombstone, please report
+   them.  They are harmless but reflect bugs that need to be eventually fixed.
+
+ADVANCED RAMSTER TOPOLOGIES
+
+The kernel code for ramster can support up to eight nodes in a cluster,
+but no testing has been done with more than three nodes.
+
+In the example described above, the "remote" node serves as a RAM
+overflow for the "local" node.  This can be made symmetric by appropriate
+settings of the sysfs remote_target_nodenum file.  For example, by setting:
+
+       # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 0, and
+
+       # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 1, each node can serve as a RAM overflow for the other.
+
+For more than two nodes, a "RAM server" can be configured.  For a
+three node system, set:
+
+       # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 1, and
+
+       # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 2.  Then node 0 is a RAM server for node 1 and node 2.
+
+In this implementation of ramster, any remote node is potentially a single
+point of failure (SPOF).  Though the probability of failure is reduced
+by automatic swap repatriation (see above), a proposed future enhancement
+to ramster improves high-availability for the cluster by sending a copy
+of each page of date to two other nodes.  Patches welcome!
index b18b887db79f70811ee43d0423a2f87d1b4583c7..a937ce1fa27afb6b8895a72f33d19e3ae2fee878 100644 (file)
@@ -66,8 +66,6 @@ static int ramster_remote_target_nodenum __read_mostly = -1;
 
 /* Used by this code. */
 long ramster_flnodes;
-ssize_t ramster_foreign_eph_pages;
-ssize_t ramster_foreign_pers_pages;
 /* FIXME frontswap selfshrinking knobs in debugfs? */
 
 static LIST_HEAD(ramster_rem_op_list);
@@ -399,14 +397,18 @@ void ramster_count_foreign_pages(bool eph, int count)
                        inc_ramster_foreign_eph_pages();
                } else {
                        dec_ramster_foreign_eph_pages();
+#ifdef CONFIG_RAMSTER_DEBUG
                        WARN_ON_ONCE(ramster_foreign_eph_pages < 0);
+#endif
                }
        } else {
                if (count > 0) {
                        inc_ramster_foreign_pers_pages();
                } else {
                        dec_ramster_foreign_pers_pages();
+#ifdef CONFIG_RAMSTER_DEBUG
                        WARN_ON_ONCE(ramster_foreign_pers_pages < 0);
+#endif
                }
        }
 }
index 522cb8e5514269b12eaee3a8875ba6f241c64dad..dcceed29d31ae415818d1fceecc6518cbc017845 100644 (file)
@@ -1922,15 +1922,15 @@ out:
 
 #ifdef CONFIG_ZCACHE_MODULE
 #ifdef CONFIG_RAMSTER
-module_param(ramster_enabled, int, S_IRUGO);
+module_param(ramster_enabled, bool, S_IRUGO);
 module_param(disable_frontswap_selfshrink, int, S_IRUGO);
 #endif
-module_param(disable_cleancache, int, S_IRUGO);
-module_param(disable_frontswap, int, S_IRUGO);
+module_param(disable_cleancache, bool, S_IRUGO);
+module_param(disable_frontswap, bool, S_IRUGO);
 #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
 module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
 #endif
-module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO);
+module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO);
 module_param(zcache_comp_name, charp, S_IRUGO);
 module_init(zcache_init);
 MODULE_LICENSE("GPL");
index ffbc6a94be522abd396a50b90ccb47e9c202a76f..d7705e5824fb2d39205d55846d560df2a8a03270 100644 (file)
@@ -651,7 +651,7 @@ static int iscsit_add_reject(
        cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                return -1;
        }
 
@@ -697,7 +697,7 @@ int iscsit_add_reject_from_cmd(
        cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                return -1;
        }
 
@@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg(
 
 static void iscsit_do_crypto_hash_buf(
        struct hash_desc *hash,
-       unsigned char *buf,
+       const void *buf,
        u32 payload_length,
        u32 padding,
        u8 *pad_bytes,
@@ -1743,7 +1743,7 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        return 0;
 out:
        if (cmd)
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
 ping_out:
        kfree(ping_data);
        return ret;
@@ -2251,7 +2251,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
                pr_err("Received logout request on connection that"
                        " is not in logged in state, ignoring request.\n");
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                return 0;
        }
 
@@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->tx_size += ISCSI_CRC_LEN;
                pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                tx_size += ISCSI_CRC_LEN;
                pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3040,9 +3035,8 @@ static int iscsit_send_r2t(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3557,11 +3548,11 @@ static int iscsit_send_reject(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn)
 {
-       u32 iov_count = 0, tx_size = 0;
-       struct iscsi_reject *hdr;
+       struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
        struct kvec *iov;
+       u32 iov_count = 0, tx_size;
 
-       iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]);
+       iscsit_build_reject(cmd, conn, hdr);
 
        iov = &cmd->iov_misc[0];
        iov[iov_count].iov_base = cmd->pdu;
@@ -3574,9 +3565,8 @@ static int iscsit_send_reject(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3585,9 +3575,8 @@ static int iscsit_send_reject(
        }
 
        if (conn->conn_ops->DataDigest) {
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)&cmd->data_crc);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
 
                iov[iov_count].iov_base = &cmd->data_crc;
                iov[iov_count++].iov_len  = ISCSI_CRC_LEN;
@@ -3676,7 +3665,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
                list_del(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                break;
        case ISTATE_SEND_NOPIN_WANT_RESPONSE:
                iscsit_mod_nopin_response_timer(conn);
@@ -4133,7 +4122,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
 
                iscsit_increment_maxcmdsn(cmd, sess);
 
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, true);
 
                spin_lock_bh(&conn->cmd_lock);
        }
index 7816af6cdd1209f7e09ac31adc027ab80b3663de..40d9dbca987b25a811ca0fb75c9c0277f65933a8 100644 (file)
@@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn(
                /*
                 * CmdSN is greater than the tail of the list.
                 */
-               if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+               if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
                        list_add_tail(&ooo_cmdsn->ooo_list,
                                        &sess->sess_ooo_cmdsn_list);
                else {
@@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn(
                         */
                        list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
                                                ooo_list) {
-                               if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+                               if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
                                        continue;
 
+                               /* Insert before this entry */
                                list_add(&ooo_cmdsn->ooo_list,
-                                       &ooo_tmp->ooo_list);
+                                       ooo_tmp->ooo_list.prev);
                                break;
                        }
                }
index ba6091bf93fcde3c1463d6ba54a8ae07f812ddf0..45a5afd5ea13b2eda8bfb11de4f6f238e3996219 100644 (file)
@@ -143,7 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
                        list_del(&cmd->i_conn_node);
                        cmd->conn = NULL;
                        spin_unlock(&cr->conn_recovery_cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock(&cr->conn_recovery_cmd_lock);
                }
                spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -165,7 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
                        list_del(&cmd->i_conn_node);
                        cmd->conn = NULL;
                        spin_unlock(&cr->conn_recovery_cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock(&cr->conn_recovery_cmd_lock);
                }
                spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -248,7 +248,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
                iscsit_remove_cmd_from_connection_recovery(cmd, sess);
 
                spin_unlock(&cr->conn_recovery_cmd_lock);
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, true);
                spin_lock(&cr->conn_recovery_cmd_lock);
        }
        spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -302,7 +302,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
                list_del(&cmd->i_conn_node);
 
                spin_unlock_bh(&conn->cmd_lock);
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, true);
                spin_lock_bh(&conn->cmd_lock);
        }
        spin_unlock_bh(&conn->cmd_lock);
@@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
 
                        list_del(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock_bh(&conn->cmd_lock);
                        continue;
                }
@@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
                     iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
                        list_del(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock_bh(&conn->cmd_lock);
                        continue;
                }
index f690be9e5293f0872667188a84df5a7e1998032e..e38222191a33b7c19ef71e827a6d995f31dc8271 100644 (file)
@@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
        /*
         * Extra parameters for ISER from RFC-5046
         */
-       param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS,
+       param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
                        PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
                        TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
        if (!param)
@@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate(
                        SET_PSTATE_NEGOTIATE(param);
                } else if (!strcmp(param->name, OFMARKINT)) {
                        SET_PSTATE_NEGOTIATE(param);
-               } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+               } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
                        if (iser == true)
                                SET_PSTATE_NEGOTIATE(param);
                } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
@@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery(
                        param->state &= ~PSTATE_NEGOTIATE;
                else if (!strcmp(param->name, OFMARKINT))
                        param->state &= ~PSTATE_NEGOTIATE;
-               else if (!strcmp(param->name, RDMAEXTENTIONS))
+               else if (!strcmp(param->name, RDMAEXTENSIONS))
                        param->state &= ~PSTATE_NEGOTIATE;
                else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
                        param->state &= ~PSTATE_NEGOTIATE;
@@ -758,9 +758,9 @@ static int iscsi_add_notunderstood_response(
        }
        INIT_LIST_HEAD(&extra_response->er_list);
 
-       strncpy(extra_response->key, key, strlen(key) + 1);
-       strncpy(extra_response->value, NOTUNDERSTOOD,
-                       strlen(NOTUNDERSTOOD) + 1);
+       strlcpy(extra_response->key, key, sizeof(extra_response->key));
+       strlcpy(extra_response->value, NOTUNDERSTOOD,
+               sizeof(extra_response->value));
 
        list_add_tail(&extra_response->er_list,
                        &param_list->extra_response_list);
@@ -1629,8 +1629,6 @@ int iscsi_decode_text_input(
 
                if (phase & PHASE_SECURITY) {
                        if (iscsi_check_for_auth_key(key) > 0) {
-                               char *tmpptr = key + strlen(key);
-                               *tmpptr = '=';
                                kfree(tmpbuf);
                                return 1;
                        }
@@ -1977,7 +1975,7 @@ void iscsi_set_session_parameters(
                        ops->SessionType = !strcmp(param->value, DISCOVERY);
                        pr_debug("SessionType:                  %s\n",
                                param->value);
-               } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+               } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
                        ops->RDMAExtensions = !strcmp(param->value, YES);
                        pr_debug("RDMAExtensions:               %s\n",
                                param->value);
index f31b9c4b83f26ca6db4cd4fa5fd357ca414c0bfa..a47046a752aac7ccdabbdf4e41c936d53a13569b 100644 (file)
@@ -1,8 +1,10 @@
 #ifndef ISCSI_PARAMETERS_H
 #define ISCSI_PARAMETERS_H
 
+#include <scsi/iscsi_proto.h>
+
 struct iscsi_extra_response {
-       char key[64];
+       char key[KEY_MAXLEN];
        char value[32];
        struct list_head er_list;
 } ____cacheline_aligned;
@@ -91,7 +93,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
 /*
  * Parameter names of iSCSI Extentions for RDMA (iSER).  See RFC-5046
  */
-#define RDMAEXTENTIONS                 "RDMAExtensions"
+#define RDMAEXTENSIONS                 "RDMAExtensions"
 #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
 #define TARGETRECVDATASEGMENTLENGTH    "TargetRecvDataSegmentLength"
 
@@ -142,7 +144,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
 /*
  * Initial values for iSER parameters following RFC-5046 Section 6
  */
-#define INITIAL_RDMAEXTENTIONS                 NO
+#define INITIAL_RDMAEXTENSIONS                 NO
 #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
 #define INITIAL_TARGETRECVDATASEGMENTLENGTH    "8192"
 
index 2cc6c9a3ffb8417b4322dcb5688f01b40d47db63..08a3bacef0c599ee72ba13e04d28a0c765783a97 100644 (file)
@@ -676,40 +676,56 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
 
 void iscsit_release_cmd(struct iscsi_cmd *cmd)
 {
-       struct iscsi_conn *conn = cmd->conn;
-
-       iscsit_free_r2ts_from_list(cmd);
-       iscsit_free_all_datain_reqs(cmd);
-
        kfree(cmd->buf_ptr);
        kfree(cmd->pdu_list);
        kfree(cmd->seq_list);
        kfree(cmd->tmr_req);
        kfree(cmd->iov_data);
 
-       if (conn) {
+       kmem_cache_free(lio_cmd_cache, cmd);
+}
+
+static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+                             bool check_queues)
+{
+       struct iscsi_conn *conn = cmd->conn;
+
+       if (scsi_cmd) {
+               if (cmd->data_direction == DMA_TO_DEVICE) {
+                       iscsit_stop_dataout_timer(cmd);
+                       iscsit_free_r2ts_from_list(cmd);
+               }
+               if (cmd->data_direction == DMA_FROM_DEVICE)
+                       iscsit_free_all_datain_reqs(cmd);
+       }
+
+       if (conn && check_queues) {
                iscsit_remove_cmd_from_immediate_queue(cmd, conn);
                iscsit_remove_cmd_from_response_queue(cmd, conn);
        }
-
-       kmem_cache_free(lio_cmd_cache, cmd);
 }
 
-void iscsit_free_cmd(struct iscsi_cmd *cmd)
+void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
 {
+       struct se_cmd *se_cmd = NULL;
+       int rc;
        /*
         * Determine if a struct se_cmd is associated with
         * this struct iscsi_cmd.
         */
        switch (cmd->iscsi_opcode) {
        case ISCSI_OP_SCSI_CMD:
-               if (cmd->data_direction == DMA_TO_DEVICE)
-                       iscsit_stop_dataout_timer(cmd);
+               se_cmd = &cmd->se_cmd;
+               __iscsit_free_cmd(cmd, true, shutdown);
                /*
                 * Fallthrough
                 */
        case ISCSI_OP_SCSI_TMFUNC:
-               transport_generic_free_cmd(&cmd->se_cmd, 1);
+               rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+               if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+                       __iscsit_free_cmd(cmd, true, shutdown);
+                       target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+               }
                break;
        case ISCSI_OP_REJECT:
                /*
@@ -718,11 +734,19 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
                 * associated cmd->se_cmd needs to be released.
                 */
                if (cmd->se_cmd.se_tfo != NULL) {
-                       transport_generic_free_cmd(&cmd->se_cmd, 1);
+                       se_cmd = &cmd->se_cmd;
+                       __iscsit_free_cmd(cmd, true, shutdown);
+
+                       rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+                       if (!rc && shutdown && se_cmd->se_sess) {
+                               __iscsit_free_cmd(cmd, true, shutdown);
+                               target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+                       }
                        break;
                }
                /* Fall-through */
        default:
+               __iscsit_free_cmd(cmd, false, shutdown);
                cmd->release_cmd(cmd);
                break;
        }
index 4f8e01a47081ff72328130a62278d421ec73ed08..a4422659d04944f58c85670bfd08c646c1bcf5ff 100644 (file)
@@ -29,7 +29,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
 extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
 extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
 extern int iscsit_check_session_usage_count(struct iscsi_session *);
 extern void iscsit_dec_session_usage_count(struct iscsi_session *);
 extern void iscsit_inc_session_usage_count(struct iscsi_session *);
index 43b7ac6c5b1c80e5132bb793d4a447abf11f443c..4a8bd36d39588b24d8de02f495e4ca166c022795 100644 (file)
@@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
        .store  = target_core_store_dev_udev_path,
 };
 
+static ssize_t target_core_show_dev_enable(void *p, char *page)
+{
+       struct se_device *dev = p;
+
+       return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+}
+
 static ssize_t target_core_store_dev_enable(
        void *p,
        const char *page,
@@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable(
 static struct target_core_configfs_attribute target_core_attr_dev_enable = {
        .attr   = { .ca_owner = THIS_MODULE,
                    .ca_name = "enable",
-                   .ca_mode = S_IWUSR },
-       .show   = NULL,
+                   .ca_mode =  S_IRUGO | S_IWUSR },
+       .show   = target_core_show_dev_enable,
        .store  = target_core_store_dev_enable,
 };
 
index 2e4d655471bc0f994c5d24df4b0c43f61fefac34..4630481b60438db290caf48ed8fc60c7f224938c 100644 (file)
@@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                struct se_dev_entry *deve = se_cmd->se_deve;
 
                deve->total_cmds++;
-               deve->total_bytes += se_cmd->data_length;
 
                if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
                    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
@@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                else if (se_cmd->data_direction == DMA_FROM_DEVICE)
                        deve->read_bytes += se_cmd->data_length;
 
-               deve->deve_cmds++;
-
                se_lun = deve->se_lun;
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
@@ -275,17 +272,6 @@ int core_free_device_list_for_node(
        return 0;
 }
 
-void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
-{
-       struct se_dev_entry *deve;
-       unsigned long flags;
-
-       spin_lock_irqsave(&se_nacl->device_list_lock, flags);
-       deve = se_nacl->device_list[se_cmd->orig_fe_lun];
-       deve->deve_cmds--;
-       spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
-}
-
 void core_update_device_list_access(
        u32 mapped_lun,
        u32 lun_access,
index 58ed683e04aefc942b7456fae4b9c6ba10f05a00..b11890d85120977d6e380db19192143ddab6251c 100644 (file)
@@ -153,10 +153,7 @@ static int fd_configure_device(struct se_device *dev)
                struct request_queue *q = bdev_get_queue(inode->i_bdev);
                unsigned long long dev_size;
 
-               dev->dev_attrib.hw_block_size =
-                       bdev_logical_block_size(inode->i_bdev);
-               dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
-
+               fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
                /*
                 * Determine the number of bytes from i_size_read() minus
                 * one (1) logical sector from underlying struct block_device
@@ -203,9 +200,7 @@ static int fd_configure_device(struct se_device *dev)
                        goto fail;
                }
 
-               dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
-               dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
-
+               fd_dev->fd_block_size = FD_BLOCKSIZE;
                /*
                 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
                 */
@@ -224,8 +219,8 @@ static int fd_configure_device(struct se_device *dev)
                dev->dev_attrib.max_write_same_len = 0x1000;
        }
 
-       fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
-
+       dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+       dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
        dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
 
        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@ -699,11 +694,12 @@ static sector_t fd_get_blocks(struct se_device *dev)
         * to handle underlying block_device resize operations.
         */
        if (S_ISBLK(i->i_mode))
-               dev_size = (i_size_read(i) - fd_dev->fd_block_size);
+               dev_size = i_size_read(i);
        else
                dev_size = fd_dev->fd_dev_size;
 
-       return div_u64(dev_size, dev->dev_attrib.block_size);
+       return div_u64(dev_size - dev->dev_attrib.block_size,
+                      dev->dev_attrib.block_size);
 }
 
 static struct sbc_ops fd_sbc_ops = {
index 07f5f94634bb63ea217250d90e37d7030da93136..aa1620abec6dc0b1ccb5a78305ca2ed41800ac5d 100644 (file)
@@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd)
                                rw = WRITE_FUA;
                        else if (!(q->flush_flags & REQ_FLUSH))
                                rw = WRITE_FUA;
+                       else
+                               rw = WRITE;
                } else {
                        rw = WRITE;
                }
index 853bab60e362ebd8371f50d7a52504ec25fb9fa4..18d49df4d0ac59435c9b50ef87fb61723415c1c5 100644 (file)
@@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp;
 struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
 int    core_free_device_list_for_node(struct se_node_acl *,
                struct se_portal_group *);
-void   core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
 void   core_update_device_list_access(u32, u32, struct se_node_acl *);
 int    core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
                u32, u32, struct se_node_acl *, struct se_portal_group *);
index e0b3c379aa148c75d4364dccbb47720c0db1b731..0921a64b555028997691fb28ad7ab84294b10a0d 100644 (file)
@@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd)
        u32 src_len;
        u64 tmp;
 
+       if (dev->rd_flags & RDF_NULLIO) {
+               target_complete_cmd(cmd, SAM_STAT_GOOD);
+               return 0;
+       }
+
        tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
        rd_offset = do_div(tmp, PAGE_SIZE);
        rd_page = tmp;
@@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd)
 }
 
 enum {
-       Opt_rd_pages, Opt_err
+       Opt_rd_pages, Opt_rd_nullio, Opt_err
 };
 
 static match_table_t tokens = {
        {Opt_rd_pages, "rd_pages=%d"},
+       {Opt_rd_nullio, "rd_nullio=%d"},
        {Opt_err, NULL}
 };
 
@@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
                                " Count: %u\n", rd_dev->rd_page_count);
                        rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
                        break;
+               case Opt_rd_nullio:
+                       match_int(args, &arg);
+                       if (arg != 1)
+                               break;
+
+                       pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
+                       rd_dev->rd_flags |= RDF_NULLIO;
+                       break;
                default:
                        break;
                }
@@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
        ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
                        rd_dev->rd_dev_id);
        bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
-                       "  SG_table_count: %u\n", rd_dev->rd_page_count,
-                       PAGE_SIZE, rd_dev->sg_table_count);
+                       "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
+                       PAGE_SIZE, rd_dev->sg_table_count,
+                       !!(rd_dev->rd_flags & RDF_NULLIO));
        return bl;
 }
 
index 933b38b6e56373f282c96a2d3af5677dd6ba8eca..1789d1e14395e0c631d56485972a5ad65b5799a1 100644 (file)
@@ -22,6 +22,7 @@ struct rd_dev_sg_table {
 } ____cacheline_aligned;
 
 #define RDF_HAS_PAGE_COUNT     0x01
+#define RDF_NULLIO             0x02
 
 struct rd_dev {
        struct se_device dev;
index f8388b4024aafa56e647c3e90bc99895df6c5e2c..21e315874a5472503dfe1ba010dea1236993b553 100644 (file)
@@ -65,7 +65,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev);
 static int transport_generic_get_mem(struct se_cmd *cmd);
-static void transport_put_cmd(struct se_cmd *cmd);
+static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
@@ -221,6 +221,7 @@ struct se_session *transport_init_session(void)
        INIT_LIST_HEAD(&se_sess->sess_list);
        INIT_LIST_HEAD(&se_sess->sess_acl_list);
        INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+       INIT_LIST_HEAD(&se_sess->sess_wait_list);
        spin_lock_init(&se_sess->sess_cmd_lock);
        kref_init(&se_sess->sess_kref);
 
@@ -1943,7 +1944,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
  * This routine unconditionally frees a command, and reference counting
  * or list removal must be done in the caller.
  */
-static void transport_release_cmd(struct se_cmd *cmd)
+static int transport_release_cmd(struct se_cmd *cmd)
 {
        BUG_ON(!cmd->se_tfo);
 
@@ -1955,11 +1956,11 @@ static void transport_release_cmd(struct se_cmd *cmd)
         * If this cmd has been setup with target_get_sess_cmd(), drop
         * the kref and call ->release_cmd() in kref callback.
         */
-        if (cmd->check_release != 0) {
-               target_put_sess_cmd(cmd->se_sess, cmd);
-               return;
-       }
+        if (cmd->check_release != 0)
+               return target_put_sess_cmd(cmd->se_sess, cmd);
+
        cmd->se_tfo->release_cmd(cmd);
+       return 1;
 }
 
 /**
@@ -1968,7 +1969,7 @@ static void transport_release_cmd(struct se_cmd *cmd)
  *
  * This routine releases our reference to the command and frees it if possible.
  */
-static void transport_put_cmd(struct se_cmd *cmd)
+static int transport_put_cmd(struct se_cmd *cmd)
 {
        unsigned long flags;
 
@@ -1976,7 +1977,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
        if (atomic_read(&cmd->t_fe_count) &&
            !atomic_dec_and_test(&cmd->t_fe_count)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               return;
+               return 0;
        }
 
        if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
@@ -1986,8 +1987,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        transport_free_pages(cmd);
-       transport_release_cmd(cmd);
-       return;
+       return transport_release_cmd(cmd);
 }
 
 void *transport_kmap_data_sg(struct se_cmd *cmd)
@@ -2152,24 +2152,25 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        }
 }
 
-void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
+       int ret = 0;
+
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
                if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                         transport_wait_for_tasks(cmd);
 
-               transport_release_cmd(cmd);
+               ret = transport_release_cmd(cmd);
        } else {
                if (wait_for_tasks)
                        transport_wait_for_tasks(cmd);
 
-               core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
-
                if (cmd->se_lun)
                        transport_lun_remove_cmd(cmd);
 
-               transport_put_cmd(cmd);
+               ret = transport_put_cmd(cmd);
        }
+       return ret;
 }
 EXPORT_SYMBOL(transport_generic_free_cmd);
 
@@ -2213,21 +2214,19 @@ static void target_release_cmd_kref(struct kref *kref)
 {
        struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
        struct se_session *se_sess = se_cmd->se_sess;
-       unsigned long flags;
 
-       spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (list_empty(&se_cmd->se_cmd_list)) {
-               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               spin_unlock(&se_sess->sess_cmd_lock);
                se_cmd->se_tfo->release_cmd(se_cmd);
                return;
        }
        if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
-               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               spin_unlock(&se_sess->sess_cmd_lock);
                complete(&se_cmd->cmd_wait_comp);
                return;
        }
        list_del(&se_cmd->se_cmd_list);
-       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+       spin_unlock(&se_sess->sess_cmd_lock);
 
        se_cmd->se_tfo->release_cmd(se_cmd);
 }
@@ -2238,7 +2237,8 @@ static void target_release_cmd_kref(struct kref *kref)
  */
 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
 {
-       return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+       return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
+                       &se_sess->sess_cmd_lock);
 }
 EXPORT_SYMBOL(target_put_sess_cmd);
 
@@ -2253,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
        unsigned long flags;
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
-       WARN_ON(se_sess->sess_tearing_down);
+       if (se_sess->sess_tearing_down) {
+               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               return;
+       }
        se_sess->sess_tearing_down = 1;
+       list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
 
-       list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
+       list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
                se_cmd->cmd_wait_set = 1;
 
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2266,44 +2269,32 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
 
 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
  * @se_sess:    session to wait for active I/O
- * @wait_for_tasks:    Make extra transport_wait_for_tasks call
  */
-void target_wait_for_sess_cmds(
-       struct se_session *se_sess,
-       int wait_for_tasks)
+void target_wait_for_sess_cmds(struct se_session *se_sess)
 {
        struct se_cmd *se_cmd, *tmp_cmd;
-       bool rc = false;
+       unsigned long flags;
 
        list_for_each_entry_safe(se_cmd, tmp_cmd,
-                               &se_sess->sess_cmd_list, se_cmd_list) {
+                               &se_sess->sess_wait_list, se_cmd_list) {
                list_del(&se_cmd->se_cmd_list);
 
                pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
                        " %d\n", se_cmd, se_cmd->t_state,
                        se_cmd->se_tfo->get_cmd_state(se_cmd));
 
-               if (wait_for_tasks) {
-                       pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
-                               " fabric state: %d\n", se_cmd, se_cmd->t_state,
-                               se_cmd->se_tfo->get_cmd_state(se_cmd));
-
-                       rc = transport_wait_for_tasks(se_cmd);
-
-                       pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
-                               " fabric state: %d\n", se_cmd, se_cmd->t_state,
-                               se_cmd->se_tfo->get_cmd_state(se_cmd));
-               }
-
-               if (!rc) {
-                       wait_for_completion(&se_cmd->cmd_wait_comp);
-                       pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
-                               " fabric state: %d\n", se_cmd, se_cmd->t_state,
-                               se_cmd->se_tfo->get_cmd_state(se_cmd));
-               }
+               wait_for_completion(&se_cmd->cmd_wait_comp);
+               pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+                       " fabric state: %d\n", se_cmd, se_cmd->t_state,
+                       se_cmd->se_tfo->get_cmd_state(se_cmd));
 
                se_cmd->se_tfo->release_cmd(se_cmd);
        }
+
+       spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+       WARN_ON(!list_empty(&se_sess->sess_cmd_list));
+       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
 }
 EXPORT_SYMBOL(target_wait_for_sess_cmds);
 
index 5b4d75fd7b49f3f857e39dacc04488977c3d448b..54ffd64ca3f7560a3b9563bf4087c949a32cf2c6 100644 (file)
@@ -169,21 +169,11 @@ static int armada_thermal_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENODEV;
-       }
-
        priv->sensor = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->sensor))
                return PTR_ERR(priv->sensor);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENODEV;
-       }
-
        priv->control = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->control))
                return PTR_ERR(priv->control);
index 4b15a5f270dc71a021bde1b6e96e8656720d45bc..a088d1365ca5ef287e46db053ba3625b8c536473 100644 (file)
@@ -149,10 +149,6 @@ static int dove_thermal_probe(struct platform_device *pdev)
                return PTR_ERR(priv->sensor);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENODEV;
-       }
        priv->control = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->control))
                return PTR_ERR(priv->control);
index d20ce9e614034ba4077d797b5584cfad4d994727..788b1ddcac6caa11709b0c383bc3d60295bdb8be 100644 (file)
@@ -925,11 +925,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        INIT_WORK(&data->irq_work, exynos_tmu_work);
 
        data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!data->mem) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENOENT;
-       }
-
        data->base = devm_ioremap_resource(&pdev->dev, data->mem);
        if (IS_ERR(data->base))
                return PTR_ERR(data->base);
index 6d0c27cd03da2676b91bfef307482a83be765697..9bffcec5ad82a4bcf0f05e3a89c73cf0b62433dc 100644 (file)
@@ -859,6 +859,7 @@ error:
  */
 static void __exit ehv_bc_exit(void)
 {
+       platform_driver_unregister(&ehv_bc_tty_driver);
        tty_unregister_driver(ehv_bc_driver);
        put_tty_driver(ehv_bc_driver);
        kfree(bcs);
index 71d6eb2c93b1c6335ba123cc9ac3c17a2af0d805..4c4a236745692ab1ee2e674f5d7dbf0334002dae 100644 (file)
@@ -1618,8 +1618,12 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
                                if (ip->type == PORT_16550A)
                                        me->fifo[p] = 1;
 
-                               opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
-                               opmode &= OP_MODE_MASK;
+                               if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) {
+                                       opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
+                                       opmode &= OP_MODE_MASK;
+                               } else {
+                                       opmode = RS232_MODE;
+                               }
                                me->iftype[p] = opmode;
                                mutex_unlock(&port->mutex);
                        }
@@ -1676,6 +1680,9 @@ static int mxser_ioctl(struct tty_struct *tty,
                int shiftbit;
                unsigned char val, mask;
 
+               if (info->board->chip_flag != MOXA_MUST_MU860_HWID)
+                       return -EFAULT;
+
                p = tty->index % 4;
                if (cmd == MOXA_SET_OP_MODE) {
                        if (get_user(opmode, (int __user *) argp))
index d655416087b7099d7639923c7c0a478c72a5afbe..6c7fe90ad72d48d2834536331e6826ee2719f94f 100644 (file)
@@ -1573,6 +1573,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
                        ldata->real_raw = 0;
        }
        n_tty_set_room(tty);
+       /*
+        * Fix tty hang when I_IXON(tty) is cleared, but the tty
+        * been stopped by STOP_CHAR(tty) before it.
+        */
+       if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
+               start_tty(tty);
+       }
+
        /* The termios change make the tty ready for I/O */
        wake_up_interruptible(&tty->write_wait);
        wake_up_interruptible(&tty->read_wait);
index 82d35c5a58fd0bc16649aacc3c004377ff23de54..354564ea47c504446ba5aeab1153d48edc4acc48 100644 (file)
@@ -150,12 +150,14 @@ static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = {
        AIOP_INTR_BIT_3
 };
 
+#ifdef CONFIG_PCI
 static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = {
        UPCI_AIOP_INTR_BIT_0,
        UPCI_AIOP_INTR_BIT_1,
        UPCI_AIOP_INTR_BIT_2,
        UPCI_AIOP_INTR_BIT_3
 };
+#endif
 
 static Byte_t RData[RDATASIZE] = {
        0x00, 0x09, 0xf6, 0x82,
@@ -227,7 +229,6 @@ static unsigned long nextLineNumber;
 static int __init init_ISA(int i);
 static void rp_wait_until_sent(struct tty_struct *tty, int timeout);
 static void rp_flush_buffer(struct tty_struct *tty);
-static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model);
 static unsigned char GetLineNumber(int ctrl, int aiop, int ch);
 static unsigned char SetLineNumber(int ctrl, int aiop, int ch);
 static void rp_start(struct tty_struct *tty);
@@ -241,11 +242,6 @@ static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags);
 static void sModemReset(CONTROLLER_T * CtlP, int chan, int on);
 static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on);
 static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data);
-static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
-                             ByteIO_t * AiopIOList, int AiopIOListSize,
-                             WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
-                             int PeriodicOnly, int altChanRingIndicator,
-                             int UPCIRingInd);
 static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
                           ByteIO_t * AiopIOList, int AiopIOListSize,
                           int IRQNum, Byte_t Frequency, int PeriodicOnly);
@@ -1775,6 +1771,145 @@ static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
 };
 MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
 
+/*  Resets the speaker controller on RocketModem II and III devices */
+static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
+{
+       ByteIO_t addr;
+
+       /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
+       if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
+               addr = CtlP->AiopIO[0] + 0x4F;
+               sOutB(addr, 0);
+       }
+
+       /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
+       if ((model == MODEL_UPCI_RM3_8PORT)
+           || (model == MODEL_UPCI_RM3_4PORT)) {
+               addr = CtlP->AiopIO[0] + 0x88;
+               sOutB(addr, 0);
+       }
+}
+
+/***************************************************************************
+Function: sPCIInitController
+Purpose:  Initialization of controller global registers and controller
+          structure.
+Call:     sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
+                          IRQNum,Frequency,PeriodicOnly)
+          CONTROLLER_T *CtlP; Ptr to controller structure
+          int CtlNum; Controller number
+          ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
+             This list must be in the order the AIOPs will be found on the
+             controller.  Once an AIOP in the list is not found, it is
+             assumed that there are no more AIOPs on the controller.
+          int AiopIOListSize; Number of addresses in AiopIOList
+          int IRQNum; Interrupt Request number.  Can be any of the following:
+                         0: Disable global interrupts
+                         3: IRQ 3
+                         4: IRQ 4
+                         5: IRQ 5
+                         9: IRQ 9
+                         10: IRQ 10
+                         11: IRQ 11
+                         12: IRQ 12
+                         15: IRQ 15
+          Byte_t Frequency: A flag identifying the frequency
+                   of the periodic interrupt, can be any one of the following:
+                      FREQ_DIS - periodic interrupt disabled
+                      FREQ_137HZ - 137 Hertz
+                      FREQ_69HZ - 69 Hertz
+                      FREQ_34HZ - 34 Hertz
+                      FREQ_17HZ - 17 Hertz
+                      FREQ_9HZ - 9 Hertz
+                      FREQ_4HZ - 4 Hertz
+                   If IRQNum is set to 0 the Frequency parameter is
+                   overidden, it is forced to a value of FREQ_DIS.
+          int PeriodicOnly: 1 if all interrupts except the periodic
+                               interrupt are to be blocked.
+                            0 is both the periodic interrupt and
+                               other channel interrupts are allowed.
+                            If IRQNum is set to 0 the PeriodicOnly parameter is
+                               overidden, it is forced to a value of 0.
+Return:   int: Number of AIOPs on the controller, or CTLID_NULL if controller
+               initialization failed.
+
+Comments:
+          If periodic interrupts are to be disabled but AIOP interrupts
+          are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
+
+          If interrupts are to be completely disabled set IRQNum to 0.
+
+          Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
+          invalid combination.
+
+          This function performs initialization of global interrupt modes,
+          but it does not actually enable global interrupts.  To enable
+          and disable global interrupts use functions sEnGlobalInt() and
+          sDisGlobalInt().  Enabling of global interrupts is normally not
+          done until all other initializations are complete.
+
+          Even if interrupts are globally enabled, they must also be
+          individually enabled for each channel that is to generate
+          interrupts.
+
+Warnings: No range checking on any of the parameters is done.
+
+          No context switches are allowed while executing this function.
+
+          After this function all AIOPs on the controller are disabled,
+          they can be enabled with sEnAiop().
+*/
+static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
+                             ByteIO_t * AiopIOList, int AiopIOListSize,
+                             WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
+                             int PeriodicOnly, int altChanRingIndicator,
+                             int UPCIRingInd)
+{
+       int i;
+       ByteIO_t io;
+
+       CtlP->AltChanRingIndicator = altChanRingIndicator;
+       CtlP->UPCIRingInd = UPCIRingInd;
+       CtlP->CtlNum = CtlNum;
+       CtlP->CtlID = CTLID_0001;       /* controller release 1 */
+       CtlP->BusType = isPCI;  /* controller release 1 */
+
+       if (ConfigIO) {
+               CtlP->isUPCI = 1;
+               CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
+               CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
+               CtlP->AiopIntrBits = upci_aiop_intr_bits;
+       } else {
+               CtlP->isUPCI = 0;
+               CtlP->PCIIO =
+                   (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
+               CtlP->AiopIntrBits = aiop_intr_bits;
+       }
+
+       sPCIControllerEOI(CtlP);        /* clear EOI if warm init */
+       /* Init AIOPs */
+       CtlP->NumAiop = 0;
+       for (i = 0; i < AiopIOListSize; i++) {
+               io = AiopIOList[i];
+               CtlP->AiopIO[i] = (WordIO_t) io;
+               CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
+
+               CtlP->AiopID[i] = sReadAiopID(io);      /* read AIOP ID */
+               if (CtlP->AiopID[i] == AIOPID_NULL)     /* if AIOP does not exist */
+                       break;  /* done looking for AIOPs */
+
+               CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
+               sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE);    /* clock prescaler */
+               sOutB(io + _INDX_DATA, sClockPrescale);
+               CtlP->NumAiop++;        /* bump count of AIOPs */
+       }
+
+       if (CtlP->NumAiop == 0)
+               return (-1);
+       else
+               return (CtlP->NumAiop);
+}
+
 /*
  *  Called when a PCI card is found.  Retrieves and stores model information,
  *  init's aiopic and serial port hardware.
@@ -2519,147 +2654,6 @@ static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
                return (CtlP->NumAiop);
 }
 
-#ifdef CONFIG_PCI
-/***************************************************************************
-Function: sPCIInitController
-Purpose:  Initialization of controller global registers and controller
-          structure.
-Call:     sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
-                          IRQNum,Frequency,PeriodicOnly)
-          CONTROLLER_T *CtlP; Ptr to controller structure
-          int CtlNum; Controller number
-          ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
-             This list must be in the order the AIOPs will be found on the
-             controller.  Once an AIOP in the list is not found, it is
-             assumed that there are no more AIOPs on the controller.
-          int AiopIOListSize; Number of addresses in AiopIOList
-          int IRQNum; Interrupt Request number.  Can be any of the following:
-                         0: Disable global interrupts
-                         3: IRQ 3
-                         4: IRQ 4
-                         5: IRQ 5
-                         9: IRQ 9
-                         10: IRQ 10
-                         11: IRQ 11
-                         12: IRQ 12
-                         15: IRQ 15
-          Byte_t Frequency: A flag identifying the frequency
-                   of the periodic interrupt, can be any one of the following:
-                      FREQ_DIS - periodic interrupt disabled
-                      FREQ_137HZ - 137 Hertz
-                      FREQ_69HZ - 69 Hertz
-                      FREQ_34HZ - 34 Hertz
-                      FREQ_17HZ - 17 Hertz
-                      FREQ_9HZ - 9 Hertz
-                      FREQ_4HZ - 4 Hertz
-                   If IRQNum is set to 0 the Frequency parameter is
-                   overidden, it is forced to a value of FREQ_DIS.
-          int PeriodicOnly: 1 if all interrupts except the periodic
-                               interrupt are to be blocked.
-                            0 is both the periodic interrupt and
-                               other channel interrupts are allowed.
-                            If IRQNum is set to 0 the PeriodicOnly parameter is
-                               overidden, it is forced to a value of 0.
-Return:   int: Number of AIOPs on the controller, or CTLID_NULL if controller
-               initialization failed.
-
-Comments:
-          If periodic interrupts are to be disabled but AIOP interrupts
-          are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
-
-          If interrupts are to be completely disabled set IRQNum to 0.
-
-          Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
-          invalid combination.
-
-          This function performs initialization of global interrupt modes,
-          but it does not actually enable global interrupts.  To enable
-          and disable global interrupts use functions sEnGlobalInt() and
-          sDisGlobalInt().  Enabling of global interrupts is normally not
-          done until all other initializations are complete.
-
-          Even if interrupts are globally enabled, they must also be
-          individually enabled for each channel that is to generate
-          interrupts.
-
-Warnings: No range checking on any of the parameters is done.
-
-          No context switches are allowed while executing this function.
-
-          After this function all AIOPs on the controller are disabled,
-          they can be enabled with sEnAiop().
-*/
-static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
-                             ByteIO_t * AiopIOList, int AiopIOListSize,
-                             WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
-                             int PeriodicOnly, int altChanRingIndicator,
-                             int UPCIRingInd)
-{
-       int i;
-       ByteIO_t io;
-
-       CtlP->AltChanRingIndicator = altChanRingIndicator;
-       CtlP->UPCIRingInd = UPCIRingInd;
-       CtlP->CtlNum = CtlNum;
-       CtlP->CtlID = CTLID_0001;       /* controller release 1 */
-       CtlP->BusType = isPCI;  /* controller release 1 */
-
-       if (ConfigIO) {
-               CtlP->isUPCI = 1;
-               CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
-               CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
-               CtlP->AiopIntrBits = upci_aiop_intr_bits;
-       } else {
-               CtlP->isUPCI = 0;
-               CtlP->PCIIO =
-                   (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
-               CtlP->AiopIntrBits = aiop_intr_bits;
-       }
-
-       sPCIControllerEOI(CtlP);        /* clear EOI if warm init */
-       /* Init AIOPs */
-       CtlP->NumAiop = 0;
-       for (i = 0; i < AiopIOListSize; i++) {
-               io = AiopIOList[i];
-               CtlP->AiopIO[i] = (WordIO_t) io;
-               CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
-
-               CtlP->AiopID[i] = sReadAiopID(io);      /* read AIOP ID */
-               if (CtlP->AiopID[i] == AIOPID_NULL)     /* if AIOP does not exist */
-                       break;  /* done looking for AIOPs */
-
-               CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
-               sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE);    /* clock prescaler */
-               sOutB(io + _INDX_DATA, sClockPrescale);
-               CtlP->NumAiop++;        /* bump count of AIOPs */
-       }
-
-       if (CtlP->NumAiop == 0)
-               return (-1);
-       else
-               return (CtlP->NumAiop);
-}
-
-/*  Resets the speaker controller on RocketModem II and III devices */
-static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
-{
-       ByteIO_t addr;
-
-       /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
-       if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
-               addr = CtlP->AiopIO[0] + 0x4F;
-               sOutB(addr, 0);
-       }
-
-       /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
-       if ((model == MODEL_UPCI_RM3_8PORT)
-           || (model == MODEL_UPCI_RM3_4PORT)) {
-               addr = CtlP->AiopIO[0] + 0x88;
-               sOutB(addr, 0);
-       }
-}
-#endif
-
 /***************************************************************************
 Function: sReadAiopID
 Purpose:  Read the AIOP idenfication number directly from an AIOP.
index 46528d57be72b710513a65ad2494ae839774981a..86c00b1c55836b3c52801c9d79bccef489c728d2 100644 (file)
@@ -2755,7 +2755,7 @@ static void __init serial8250_isa_init_ports(void)
        if (nr_uarts > UART_NR)
                nr_uarts = UART_NR;
 
-       for (i = 0; i < UART_NR; i++) {
+       for (i = 0; i < nr_uarts; i++) {
                struct uart_8250_port *up = &serial8250_ports[i];
                struct uart_port *port = &up->port;
 
@@ -2916,7 +2916,7 @@ static int __init serial8250_console_setup(struct console *co, char *options)
         * if so, search for the first available port that does have
         * console support.
         */
-       if (co->index >= UART_NR)
+       if (co->index >= nr_uarts)
                co->index = 0;
        port = &serial8250_ports[co->index].port;
        if (!port->iobase && !port->membase)
@@ -2957,7 +2957,7 @@ int serial8250_find_port(struct uart_port *p)
        int line;
        struct uart_port *port;
 
-       for (line = 0; line < UART_NR; line++) {
+       for (line = 0; line < nr_uarts; line++) {
                port = &serial8250_ports[line].port;
                if (uart_match_port(p, port))
                        return line;
@@ -3110,7 +3110,7 @@ static int serial8250_remove(struct platform_device *dev)
 {
        int i;
 
-       for (i = 0; i < UART_NR; i++) {
+       for (i = 0; i < nr_uarts; i++) {
                struct uart_8250_port *up = &serial8250_ports[i];
 
                if (up->port.dev == &dev->dev)
@@ -3178,7 +3178,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
        /*
         * First, find a port entry which matches.
         */
-       for (i = 0; i < UART_NR; i++)
+       for (i = 0; i < nr_uarts; i++)
                if (uart_match_port(&serial8250_ports[i].port, port))
                        return &serial8250_ports[i];
 
@@ -3187,7 +3187,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
         * free entry.  We look for one which hasn't been previously
         * used (indicated by zero iobase).
         */
-       for (i = 0; i < UART_NR; i++)
+       for (i = 0; i < nr_uarts; i++)
                if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
                    serial8250_ports[i].port.iobase == 0)
                        return &serial8250_ports[i];
@@ -3196,7 +3196,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
         * That also failed.  Last resort is to find any entry which
         * doesn't have a real port associated with it.
         */
-       for (i = 0; i < UART_NR; i++)
+       for (i = 0; i < nr_uarts; i++)
                if (serial8250_ports[i].port.type == PORT_UNKNOWN)
                        return &serial8250_ports[i];
 
index beaa283f5cc6f75c24484b11beb73704f25a6ccd..d07b6af3a9379db82927677a5915d9bae01d7874 100644 (file)
@@ -338,7 +338,8 @@ static int dw8250_runtime_suspend(struct device *dev)
 {
        struct dw8250_data *data = dev_get_drvdata(dev);
 
-       clk_disable_unprepare(data->clk);
+       if (!IS_ERR(data->clk))
+               clk_disable_unprepare(data->clk);
 
        return 0;
 }
@@ -347,7 +348,8 @@ static int dw8250_runtime_resume(struct device *dev)
 {
        struct dw8250_data *data = dev_get_drvdata(dev);
 
-       clk_prepare_enable(data->clk);
+       if (!IS_ERR(data->clk))
+               clk_prepare_enable(data->clk);
 
        return 0;
 }
@@ -367,6 +369,7 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
 static const struct acpi_device_id dw8250_acpi_match[] = {
        { "INT33C4", 0 },
        { "INT33C5", 0 },
+       { "80860F0A", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
index 8ab70a6209199145f840b9d0cff0b255700f454c..e2774f9ecd59f16915e0647028e643e616b9c4cb 100644 (file)
@@ -332,7 +332,7 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
                dmaengine_slave_config(chan, &rx_conf);
                uap->dmarx.chan = chan;
 
-               if (plat->dma_rx_poll_enable) {
+               if (plat && plat->dma_rx_poll_enable) {
                        /* Set poll rate if specified. */
                        if (plat->dma_rx_poll_rate) {
                                uap->dmarx.auto_poll_rate = false;
index 147c9e1935951dcca13a7df60b2d02e999663a30..8cdfbd365892146611b772bbfb217a8f48938c3a 100644 (file)
@@ -761,6 +761,8 @@ static int imx_startup(struct uart_port *port)
 
        temp = readl(sport->port.membase + UCR2);
        temp |= (UCR2_RXEN | UCR2_TXEN);
+       if (!sport->have_rtscts)
+               temp |= UCR2_IRTS;
        writel(temp, sport->port.membase + UCR2);
 
        if (USE_IRDA(sport)) {
index e956377a38fe53e299766724c278a05a35d802c4..65be0c00c4bff3f802549a23dfe34515a5a03b54 100644 (file)
@@ -707,8 +707,10 @@ static int __init mcf_init(void)
        if (rc)
                return rc;
        rc = platform_driver_register(&mcf_platform_driver);
-       if (rc)
+       if (rc) {
+               uart_unregister_driver(&mcf_driver);
                return rc;
+       }
        return 0;
 }
 
index 018bad922554fc45b9ad711bcab2a106fc683b6e..f51b280f3bf280b882130ec92b499be65937eed2 100644 (file)
@@ -1497,18 +1497,23 @@ mpc52xx_uart_init(void)
        if (psc_ops && psc_ops->fifoc_init) {
                ret = psc_ops->fifoc_init();
                if (ret)
-                       return ret;
+                       goto err_init;
        }
 
        ret = platform_driver_register(&mpc52xx_uart_of_driver);
        if (ret) {
                printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
                       __FILE__, ret);
-               uart_unregister_driver(&mpc52xx_uart_driver);
-               return ret;
+               goto err_reg;
        }
 
        return 0;
+err_reg:
+       if (psc_ops && psc_ops->fifoc_uninit)
+               psc_ops->fifoc_uninit();
+err_init:
+       uart_unregister_driver(&mpc52xx_uart_driver);
+       return ret;
 }
 
 static void __exit
index 77287c54f331682425403e045b0548c7f460590c..549c70a2a63e8397521b63fd6120e7dad9cfb079 100644 (file)
@@ -199,7 +199,7 @@ static void nwpserial_shutdown(struct uart_port *port)
        dcr_write(up->dcr_host, UART_IER, up->ier);
 
        /* free irq */
-       free_irq(up->port.irq, port);
+       free_irq(up->port.irq, up);
 }
 
 static int nwpserial_verify_port(struct uart_port *port,
index 30d4f7a783cd1f0bbfdf6ebaf491f2400153eaf0..f0b9f6b52b32cd887ad6b86586f97aa9eec9648c 100644 (file)
@@ -202,26 +202,6 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
        return pdata->get_context_loss_count(up->dev);
 }
 
-static void serial_omap_set_forceidle(struct uart_omap_port *up)
-{
-       struct omap_uart_port_info *pdata = up->dev->platform_data;
-
-       if (!pdata || !pdata->set_forceidle)
-               return;
-
-       pdata->set_forceidle(up->dev);
-}
-
-static void serial_omap_set_noidle(struct uart_omap_port *up)
-{
-       struct omap_uart_port_info *pdata = up->dev->platform_data;
-
-       if (!pdata || !pdata->set_noidle)
-               return;
-
-       pdata->set_noidle(up->dev);
-}
-
 static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
 {
        struct omap_uart_port_info *pdata = up->dev->platform_data;
@@ -298,8 +278,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
                serial_out(up, UART_IER, up->ier);
        }
 
-       serial_omap_set_forceidle(up);
-
        pm_runtime_mark_last_busy(up->dev);
        pm_runtime_put_autosuspend(up->dev);
 }
@@ -364,7 +342,6 @@ static void serial_omap_start_tx(struct uart_port *port)
 
        pm_runtime_get_sync(up->dev);
        serial_omap_enable_ier_thri(up);
-       serial_omap_set_noidle(up);
        pm_runtime_mark_last_busy(up->dev);
        pm_runtime_put_autosuspend(up->dev);
 }
index 074b9194144fdefa1e09536a96626129624a64e0..0c8a9fa2be6cee12182bb8e32e0ed37be1c8fdab 100644 (file)
@@ -1166,6 +1166,18 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
                ourport->tx_irq = ret;
 
        ourport->clk    = clk_get(&platdev->dev, "uart");
+       if (IS_ERR(ourport->clk)) {
+               pr_err("%s: Controller clock not found\n",
+                               dev_name(&platdev->dev));
+               return PTR_ERR(ourport->clk);
+       }
+
+       ret = clk_prepare_enable(ourport->clk);
+       if (ret) {
+               pr_err("uart: clock failed to prepare+enable: %d\n", ret);
+               clk_put(ourport->clk);
+               return ret;
+       }
 
        /* Keep all interrupts masked and cleared */
        if (s3c24xx_serial_has_interrupt_mask(port)) {
@@ -1180,6 +1192,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 
        /* reset the fifos (and setup the uart) */
        s3c24xx_serial_resetport(port, cfg);
+       clk_disable_unprepare(ourport->clk);
        return 0;
 }
 
@@ -1803,6 +1816,7 @@ static int __init s3c24xx_serial_modinit(void)
 
 static void __exit s3c24xx_serial_modexit(void)
 {
+       platform_driver_unregister(&samsung_serial_driver);
        uart_unregister_driver(&s3c24xx_uart_drv);
 }
 
index fbd447b390f775fa8182a290266d3d4f41f46e2c..740202d8a5c4b732ab9c98b188c995d3a0f78587 100644 (file)
@@ -779,7 +779,6 @@ int vc_allocate(unsigned int currcons)      /* return 0 on success */
                con_set_default_unimap(vc);
            vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
            if (!vc->vc_screenbuf) {
-               tty_port_destroy(&vc->port);
                kfree(vc);
                vc_cons[currcons].d = NULL;
                return -ENOMEM;
@@ -986,26 +985,25 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
        return ret;
 }
 
-void vc_deallocate(unsigned int currcons)
+struct vc_data *vc_deallocate(unsigned int currcons)
 {
+       struct vc_data *vc = NULL;
+
        WARN_CONSOLE_UNLOCKED();
 
        if (vc_cons_allocated(currcons)) {
-               struct vc_data *vc = vc_cons[currcons].d;
-               struct vt_notifier_param param = { .vc = vc };
+               struct vt_notifier_param param;
 
+               param.vc = vc = vc_cons[currcons].d;
                atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
                vcs_remove_sysfs(currcons);
                vc->vc_sw->con_deinit(vc);
                put_pid(vc->vt_pid);
                module_put(vc->vc_sw->owner);
                kfree(vc->vc_screenbuf);
-               if (currcons >= MIN_NR_CONSOLES) {
-                       tty_port_destroy(&vc->port);
-                       kfree(vc);
-               }
                vc_cons[currcons].d = NULL;
        }
+       return vc;
 }
 
 /*
index 98ff1735eafc0841428a2e2fa2257909073c775e..fc2c06c66e89d5e7536c5bb5387bc0fd9a837086 100644 (file)
@@ -283,6 +283,51 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_
        return 0;
 }
 
+/* deallocate a single console, if possible (leave 0) */
+static int vt_disallocate(unsigned int vc_num)
+{
+       struct vc_data *vc = NULL;
+       int ret = 0;
+
+       if (!vc_num)
+               return 0;
+
+       console_lock();
+       if (VT_BUSY(vc_num))
+               ret = -EBUSY;
+       else
+               vc = vc_deallocate(vc_num);
+       console_unlock();
+
+       if (vc && vc_num >= MIN_NR_CONSOLES) {
+               tty_port_destroy(&vc->port);
+               kfree(vc);
+       }
+
+       return ret;
+}
+
+/* deallocate all unused consoles, but leave 0 */
+static void vt_disallocate_all(void)
+{
+       struct vc_data *vc[MAX_NR_CONSOLES];
+       int i;
+
+       console_lock();
+       for (i = 1; i < MAX_NR_CONSOLES; i++)
+               if (!VT_BUSY(i))
+                       vc[i] = vc_deallocate(i);
+               else
+                       vc[i] = NULL;
+       console_unlock();
+
+       for (i = 1; i < MAX_NR_CONSOLES; i++) {
+               if (vc[i] && i >= MIN_NR_CONSOLES) {
+                       tty_port_destroy(&vc[i]->port);
+                       kfree(vc[i]);
+               }
+       }
+}
 
 
 /*
@@ -769,24 +814,10 @@ int vt_ioctl(struct tty_struct *tty,
                        ret = -ENXIO;
                        break;
                }
-               if (arg == 0) {
-                   /* deallocate all unused consoles, but leave 0 */
-                       console_lock();
-                       for (i=1; i<MAX_NR_CONSOLES; i++)
-                               if (! VT_BUSY(i))
-                                       vc_deallocate(i);
-                       console_unlock();
-               } else {
-                       /* deallocate a single console, if possible */
-                       arg--;
-                       if (VT_BUSY(arg))
-                               ret = -EBUSY;
-                       else if (arg) {                       /* leave 0 */
-                               console_lock();
-                               vc_deallocate(arg);
-                               console_unlock();
-                       }
-               }
+               if (arg == 0)
+                       vt_disallocate_all();
+               else
+                       ret = vt_disallocate(--arg);
                break;
 
        case VT_RESIZE:
index e92eeaf251fe33c8c12f693b54db8b75258643a2..5295be0342c1acc88f5698bbdc29a16268ad551a 100644 (file)
@@ -45,6 +45,7 @@ config UIO_PDRV_GENIRQ
 
 config UIO_DMEM_GENIRQ
        tristate "Userspace platform driver with generic irq and dynamic memory"
+       depends on HAS_DMA
        help
          Platform driver for Userspace I/O devices, including generic
          interrupt handling code. Shared interrupts are not supported.
index b7eb86ad6bf2c2538e1a5d08d358c9b29f432e76..8a7eb77233b4a7b275e042dfeb965be2f45393f8 100644 (file)
@@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
 {
        int ret, len;
        __le32 *buf;
-       int offb, offd;
+       int offb;
+       unsigned int offd;
        const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
        int buflen =  ((size - 1) / stride + 1 + size * 2) * 4;
 
index 608a2aeb400c929726c4a9581dfad1e09eeed179..b2df442eb3e5b9537772569cfd321e32e5ef2ce5 100644 (file)
@@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC
 config USB_CHIPIDEA_HOST
        bool "ChipIdea host controller"
        depends on USB=y || USB=USB_CHIPIDEA
-       depends on USB_EHCI_HCD
+       depends on USB_EHCI_HCD=y
        select USB_EHCI_ROOT_HUB_TT
        help
          Say Y here to enable host controller functionality of the
index 8faec9dbbb84326d436ffc568451b322eb95a1a3..73f9d5f15adb6706b30a48b4c34963027d2565a0 100644 (file)
@@ -173,17 +173,10 @@ static int ci13xxx_imx_probe(struct platform_device *pdev)
 
        ci13xxx_imx_platdata.phy = data->phy;
 
-       if (!pdev->dev.dma_mask) {
-               pdev->dev.dma_mask = devm_kzalloc(&pdev->dev,
-                                     sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
-               if (!pdev->dev.dma_mask) {
-                       ret = -ENOMEM;
-                       dev_err(&pdev->dev, "Failed to alloc dma_mask!\n");
-                       goto err;
-               }
-               *pdev->dev.dma_mask = DMA_BIT_MASK(32);
-               dma_set_coherent_mask(&pdev->dev, *pdev->dev.dma_mask);
-       }
+       if (!pdev->dev.dma_mask)
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        if (usbmisc_ops && usbmisc_ops->init) {
                ret = usbmisc_ops->init(&pdev->dev);
index 450107e5f657a32a3b7d5b0fd1e310a99e77ec66..475c9c1146896dbefaf5ae71adbfcf86bcb5aa55 100644 (file)
@@ -276,8 +276,9 @@ static void ci_role_work(struct work_struct *work)
 
                ci_role_stop(ci);
                ci_role_start(ci, role);
-               enable_irq(ci->irq);
        }
+
+       enable_irq(ci->irq);
 }
 
 static irqreturn_t ci_irq(int irq, void *data)
@@ -370,11 +371,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "missing resource\n");
-               return -ENODEV;
-       }
-
        base = devm_ioremap_resource(dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
index 519ead2443c5630baaacaa4e1023e8ec766a2e39..b501346484aeb16ab5e92e91e9aaf2458bf04784 100644 (file)
@@ -1678,8 +1678,11 @@ static int udc_start(struct ci13xxx *ci)
 
        ci->gadget.ep0 = &ci->ep0in->ep;
 
-       if (ci->global_phy)
+       if (ci->global_phy) {
                ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+               if (IS_ERR(ci->transceiver))
+                       ci->transceiver = NULL;
+       }
 
        if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
                if (ci->transceiver == NULL) {
@@ -1694,7 +1697,7 @@ static int udc_start(struct ci13xxx *ci)
                        goto put_transceiver;
        }
 
-       if (!IS_ERR_OR_NULL(ci->transceiver)) {
+       if (ci->transceiver) {
                retval = otg_set_peripheral(ci->transceiver->otg,
                                                &ci->gadget);
                if (retval)
@@ -1711,7 +1714,7 @@ static int udc_start(struct ci13xxx *ci)
        return retval;
 
 remove_trans:
-       if (!IS_ERR_OR_NULL(ci->transceiver)) {
+       if (ci->transceiver) {
                otg_set_peripheral(ci->transceiver->otg, NULL);
                if (ci->global_phy)
                        usb_put_phy(ci->transceiver);
@@ -1719,7 +1722,7 @@ remove_trans:
 
        dev_err(dev, "error = %i\n", retval);
 put_transceiver:
-       if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy)
+       if (ci->transceiver && ci->global_phy)
                usb_put_phy(ci->transceiver);
 destroy_eps:
        destroy_eps(ci);
@@ -1747,7 +1750,7 @@ static void udc_stop(struct ci13xxx *ci)
        dma_pool_destroy(ci->td_pool);
        dma_pool_destroy(ci->qh_pool);
 
-       if (!IS_ERR_OR_NULL(ci->transceiver)) {
+       if (ci->transceiver) {
                otg_set_peripheral(ci->transceiver->otg, NULL);
                if (ci->global_phy)
                        usb_put_phy(ci->transceiver);
index 8772b3659296b90b2c009774fd3bba8c0498236e..db535b0aa172c85b3787a476b8fe3adeefbe3e6f 100644 (file)
@@ -51,7 +51,7 @@ config USB_DYNAMIC_MINORS
 
 config USB_OTG
        bool "OTG support"
-       depends on USB_SUSPEND
+       depends on PM_RUNTIME
        default n
        help
          The most notable feature of USB OTG is support for a
index caefc800f2986defa9f66973d071fc7140d632d4..c88c4fb9459dfbc94119e8a0590a924fa5868f0c 100644 (file)
@@ -1287,9 +1287,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
                        goto error;
                }
                for (totlen = u = 0; u < uurb->number_of_packets; u++) {
-                       /* arbitrary limit,
-                        * sufficient for USB 2.0 high-bandwidth iso */
-                       if (isopkt[u].length > 8192) {
+                       /*
+                        * arbitrary limit need for USB 3.0
+                        * bMaxBurst (0~15 allowed, 1~16 packets)
+                        * bmAttributes (bit 1:0, mult 0~2, 1~3 packets)
+                        * sizemax: 1024 * 16 * 3 = 49152
+                        */
+                       if (isopkt[u].length > 49152) {
                                ret = -EINVAL;
                                goto error;
                        }
index ab5638d9c707c0524eb37a58fc1ce3e95d1a7ae9..a63598895077ff8c984c0ed473d5ee90afa52086 100644 (file)
@@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Edirol SD-20 */
        { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Alcor Micro Corp. Hub */
+       { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
index ea5ee9c21c3514cc7a0fe87ff2f6d15bcc206eb2..757aa18027d05ca0eb198d97d92f1710e8e58def 100644 (file)
@@ -19,21 +19,21 @@ choice
 
 config USB_DWC3_HOST
        bool "Host only mode"
-       depends on USB
+       depends on USB=y || USB=USB_DWC3
        help
          Select this when you want to use DWC3 in host mode only,
          thereby the gadget feature will be regressed.
 
 config USB_DWC3_GADGET
        bool "Gadget only mode"
-       depends on USB_GADGET
+       depends on USB_GADGET=y || USB_GADGET=USB_DWC3
        help
          Select this when you want to use DWC3 in gadget mode only,
          thereby the host feature will be regressed.
 
 config USB_DWC3_DUAL_ROLE
        bool "Dual Role mode"
-       depends on (USB && USB_GADGET)
+       depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
        help
          This is the default mode of working of DWC3 controller where
          both host and gadget features are enabled.
index a8afe6e2662186e50e45cf578d967c901c9a7456..8ce9d7fd6cfc30051462373d3c8d1f2b681dfcb3 100644 (file)
@@ -95,8 +95,6 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused)
        return 0;
 }
 
-static u64 dwc3_exynos_dma_mask = DMA_BIT_MASK(32);
-
 static int dwc3_exynos_probe(struct platform_device *pdev)
 {
        struct dwc3_exynos      *exynos;
@@ -118,7 +116,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
         * Once we move to full device tree support this will vanish off.
         */
        if (!dev->dma_mask)
-               dev->dma_mask = &dwc3_exynos_dma_mask;
+               dev->dma_mask = &dev->coherent_dma_mask;
+       if (!dev->coherent_dma_mask)
+               dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
        platform_set_drvdata(pdev, exynos);
 
@@ -164,9 +164,9 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
 {
        struct dwc3_exynos      *exynos = platform_get_drvdata(pdev);
 
+       device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
        platform_device_unregister(exynos->usb2_phy);
        platform_device_unregister(exynos->usb3_phy);
-       device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
 
        clk_disable_unprepare(exynos->clk);
 
index 227d4a7acad7595d4774e9fc335a790c742087be..eba9e2baf32b3d23a78977342286a2a8be5506aa 100644 (file)
@@ -196,9 +196,9 @@ static void dwc3_pci_remove(struct pci_dev *pci)
 {
        struct dwc3_pci *glue = pci_get_drvdata(pci);
 
+       platform_device_unregister(glue->dwc3);
        platform_device_unregister(glue->usb2_phy);
        platform_device_unregister(glue->usb3_phy);
-       platform_device_unregister(glue->dwc3);
        pci_set_drvdata(pci, NULL);
        pci_disable_device(pci);
 }
index 2b6e7e0012071259464c1db3acf6c54b55e5971e..b5e5b35df49c8ff49ca5817ced0838e79dad9ccf 100644 (file)
@@ -1706,11 +1706,19 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
                dep = dwc->eps[epnum];
                if (!dep)
                        continue;
-
-               dwc3_free_trb_pool(dep);
-
-               if (epnum != 0 && epnum != 1)
+               /*
+                * Physical endpoints 0 and 1 are special; they form the
+                * bi-directional USB endpoint 0.
+                *
+                * For those two physical endpoints, we don't allocate a TRB
+                * pool nor do we add them the endpoints list. Due to that, we
+                * shouldn't do these two operations otherwise we would end up
+                * with all sorts of bugs when removing dwc3.ko.
+                */
+               if (epnum != 0 && epnum != 1) {
+                       dwc3_free_trb_pool(dep);
                        list_del(&dep->endpoint.ep_list);
+               }
 
                kfree(dep);
        }
index 83300d94a8933ff97a52992466799874cdeacb20..f41aa0d0c414312163bf2076935fb96adccd7036 100644 (file)
@@ -146,7 +146,6 @@ config USB_LPC32XX
        depends on ARCH_LPC32XX
        depends on USB_PHY
        select USB_ISP1301
-       select USB_OTG_UTILS
        help
           This option selects the USB device controller in the LPC32xx SoC.
 
index f2a970f75bfa949b2c28db72c183e00790fea280..5a5128a226f737d2ec74915cde9f23d5d74e0470 100644 (file)
@@ -1992,8 +1992,6 @@ err_map_regs:
 err_get_hclk:
        clk_put(pclk);
 
-       platform_set_drvdata(pdev, NULL);
-
        return ret;
 }
 
index 6e6518264c42253b810bb41d93fbcd46da7a4906..fd24cb4540a49d6b0382b932ff99eae2dc9c309f 100644 (file)
@@ -2334,21 +2334,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "error finding USBD resource\n");
-               return -ENXIO;
-       }
-
        udc->usbd_regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(udc->usbd_regs))
                return PTR_ERR(udc->usbd_regs);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(dev, "error finding IUDMA resource\n");
-               return -ENXIO;
-       }
-
        udc->iudma_regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(udc->iudma_regs))
                return PTR_ERR(udc->iudma_regs);
@@ -2420,7 +2410,6 @@ static int bcm63xx_udc_remove(struct platform_device *pdev)
        usb_del_gadget_udc(&udc->gadget);
        BUG_ON(udc->driver);
 
-       platform_set_drvdata(pdev, NULL);
        bcm63xx_uninit_udc_hw(udc);
 
        return 0;
index 3d5cfc9c2c78a51950701b68bb94bfbb25f2f25c..80e7f75a56c7efa43b33a752671eb8c263bb1bf9 100644 (file)
@@ -821,8 +821,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
                gi->gstrings[i] = NULL;
                s = usb_gstrings_attach(&gi->cdev, gi->gstrings,
                                USB_GADGET_FIRST_AVAIL_IDX);
-               if (IS_ERR(s))
+               if (IS_ERR(s)) {
+                       ret = PTR_ERR(s);
                        goto err_comp_cleanup;
+               }
 
                gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id;
                gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id;
@@ -847,8 +849,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
                        }
                        cfg->gstrings[i] = NULL;
                        s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1);
-                       if (IS_ERR(s))
+                       if (IS_ERR(s)) {
+                               ret = PTR_ERR(s);
                                goto err_comp_cleanup;
+                       }
                        c->iConfiguration = s[0].id;
                }
 
index a792e322f4f1b84c58e20538b48d1e7beea4082f..c588e8e486e590843ed2714403f282a7b3748a68 100644 (file)
@@ -1001,7 +1001,6 @@ static int dummy_udc_remove(struct platform_device *pdev)
        struct dummy    *dum = platform_get_drvdata(pdev);
 
        usb_del_gadget_udc(&dum->gadget);
-       platform_set_drvdata(pdev, NULL);
        device_remove_file(&dum->gadget.dev, &dev_attr_function);
        return 0;
 }
@@ -2661,8 +2660,10 @@ static int __init init(void)
        }
        for (i = 0; i < mod_data.num; i++) {
                dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL);
-               if (!dum[i])
+               if (!dum[i]) {
+                       retval = -ENOMEM;
                        goto err_add_pdata;
+               }
                retval = platform_device_add_data(the_hcd_pdev[i], &dum[i],
                                sizeof(void *));
                if (retval)
index d893d69290794efa9c8d1e064fe1abd62077ff6a..abf8a31ae146028a89130835d600b4a7dfa00486 100644 (file)
@@ -816,6 +816,7 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f)
  * @c: the configuration to support the network link
  * @ethaddr: a buffer in which the ethernet address of the host side
  *     side of the link was recorded
+ * @dev: eth_dev structure
  * Context: single threaded during gadget setup
  *
  * Returns zero on success, else negative errno.
index 185d6f5e4e4d15a933aeaa5b5e8ab0edc2c642b5..7be04b3424941f73c96d7a6de51d53101efe0f92 100644 (file)
@@ -373,6 +373,7 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f)
  * @c: the configuration to support the network link
  * @ethaddr: a buffer in which the ethernet address of the host side
  *     side of the link was recorded
+ * @dev: eth_dev structure
  * Context: single threaded during gadget setup
  *
  * Returns zero on success, else negative errno.
index c7468b6c07b0057041f63328da8cf075f6ada80b..03c1fb686644e02d34b5b0f4a9c8e847d6a6de27 100644 (file)
@@ -456,8 +456,6 @@ static int snd_uac2_remove(struct platform_device *pdev)
 {
        struct snd_card *card = platform_get_drvdata(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (card)
                return snd_card_free(card);
 
index cec8871b77f9d9acd91b2c845175b6f9150e8571..b8632d40f8bffcd3f653a99c12f6d6b02a8cd11d 100644 (file)
@@ -1461,8 +1461,10 @@ static int __init fusb300_probe(struct platform_device *pdev)
 
        fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
                                GFP_KERNEL);
-       if (fusb300->ep0_req == NULL)
+       if (fusb300->ep0_req == NULL) {
+               ret = -ENOMEM;
                goto clean_up3;
+       }
 
        init_controller(fusb300);
        ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
index b5cebd6b0d7af170128ede0f90754052b694aeb2..9b2d24e4c95f0c27f3e98c0d2dbf3aa6bfad4b83 100644 (file)
@@ -1511,8 +1511,6 @@ static int __exit imx_udc_remove(struct platform_device *pdev)
        if (pdata->exit)
                pdata->exit(&pdev->dev);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 866ef09992478e18c114592305297efe640d225f..51cfe72da5bb88d6fb0a21a1d1d2753fa2e6c115 100644 (file)
@@ -1660,8 +1660,10 @@ static int __init m66592_probe(struct platform_device *pdev)
        m66592->epaddr2ep[0] = &m66592->ep[0];
 
        m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
-       if (m66592->ep0_req == NULL)
+       if (m66592->ep0_req == NULL) {
+               ret = -ENOMEM;
                goto clean_up3;
+       }
        m66592->ep0_req->complete = nop_completion;
 
        init_controller(m66592);
index ef47495dec8f5e638e97ee4171b0a54b22dc2ba4..95c531d5aa4fa2dd9fcf4d038c5426254cb16629 100644 (file)
@@ -2236,7 +2236,6 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev)
                dev->transceiver = NULL;
        }
 
-       platform_set_drvdata(pdev, NULL);
        the_controller = NULL;
        return 0;
 }
index 0b742d171843fc8579691930791d0926a7f3d958..7ff7d9cf2061d28ebcd2b4a2935678b0f1460144 100644 (file)
@@ -1977,8 +1977,10 @@ static int __init r8a66597_probe(struct platform_device *pdev)
 
        r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
                                                        GFP_KERNEL);
-       if (r8a66597->ep0_req == NULL)
+       if (r8a66597->ep0_req == NULL) {
+               ret = -ENOMEM;
                goto clean_up3;
+       }
        r8a66597->ep0_req->complete = nop_completion;
 
        ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
index a3cdc32115d52c646f2ae2b89ded1fcf378a9d70..af22f24046b239dcc9dda8553c17377326c62ded 100644 (file)
@@ -437,7 +437,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
        if (hs_req->req.length == 0)
                return;
 
-       usb_gadget_unmap_request(&hsotg->gadget, hs_req, hs_ep->dir_in);
+       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
 }
 
 /**
index d0e75e1b3ccb9323de0a29622586894710fd2679..09c4f70c93c4cb13639cf89ed7d423c2231df621 100644 (file)
@@ -1851,6 +1851,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
                irq = gpio_to_irq(udc_info->vbus_pin);
                if (irq < 0) {
                        dev_err(dev, "no irq for gpio vbus pin\n");
+                       retval = irq;
                        goto err_gpio_claim;
                }
 
@@ -1948,8 +1949,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
        iounmap(base_addr);
        release_mem_region(rsrc_start, rsrc_len);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (!IS_ERR(udc_clock) && udc_clock != NULL) {
                clk_disable(udc_clock);
                clk_put(udc_clock);
index 2cd6262e8b7149a23d8458e76e5ef6ef3b919f1c..0deb9d6cde26245fd1c52c0b541eecd100aedce6 100644 (file)
@@ -284,12 +284,16 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
        ss_opts->bulk_buflen = gzero_options.bulk_buflen;
 
        func_ss = usb_get_function(func_inst_ss);
-       if (IS_ERR(func_ss))
+       if (IS_ERR(func_ss)) {
+               status = PTR_ERR(func_ss);
                goto err_put_func_inst_ss;
+       }
 
        func_inst_lb = usb_get_function_instance("Loopback");
-       if (IS_ERR(func_inst_lb))
+       if (IS_ERR(func_inst_lb)) {
+               status = PTR_ERR(func_inst_lb);
                goto err_put_func_ss;
+       }
 
        lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst);
        lb_opts->bulk_buflen = gzero_options.bulk_buflen;
index de94f2699063b12dd3c0cd1613a9cd2e37cd00c6..344d5e2f87d73a7234452a2b698f63b4959494e5 100644 (file)
@@ -507,7 +507,7 @@ endif # USB_OHCI_HCD
 
 config USB_UHCI_HCD
        tristate "UHCI HCD (most Intel and VIA) support"
-       depends on PCI || SPARC_LEON || ARCH_VT8500
+       depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC
        ---help---
          The Universal Host Controller Interface is a standard by Intel for
          accessing the USB hardware in the PC (which is also called the USB
@@ -524,26 +524,19 @@ config USB_UHCI_HCD
 
 config USB_UHCI_SUPPORT_NON_PCI_HC
        bool
-       depends on USB_UHCI_HCD
-       default y if (SPARC_LEON || ARCH_VT8500)
+       default y if (SPARC_LEON || USB_UHCI_PLATFORM)
 
 config USB_UHCI_PLATFORM
-       bool "Generic UHCI Platform Driver support"
-       depends on USB_UHCI_SUPPORT_NON_PCI_HC
+       bool
        default y if ARCH_VT8500
-       ---help---
-         Enable support for generic UHCI platform devices that require no
-         additional configuration.
 
 config USB_UHCI_BIG_ENDIAN_MMIO
        bool
-       depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON
-       default y
+       default y if SPARC_LEON
 
 config USB_UHCI_BIG_ENDIAN_DESC
        bool
-       depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON
-       default y
+       default y if SPARC_LEON
 
 config USB_FHCI_HCD
        tristate "Freescale QE USB Host Controller support"
index 66420097c24234eb3db9d09a656345cd80edcc9f..02f4611faa62c571a3a59ff4b33a35ead1966caf 100644 (file)
@@ -63,8 +63,6 @@ static void atmel_stop_ehci(struct platform_device *pdev)
 
 /*-------------------------------------------------------------------------*/
 
-static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32);
-
 static int ehci_atmel_drv_probe(struct platform_device *pdev)
 {
        struct usb_hcd *hcd;
@@ -93,7 +91,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &at91_ehci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
        if (!hcd) {
index 312fc10da3c7aade0225cd126f28e372f1906058..246e124e6ac55c3dc66ba822a9b3f8ba5bebdd6a 100644 (file)
@@ -1286,23 +1286,6 @@ MODULE_LICENSE ("GPL");
 #define        PLATFORM_DRIVER         ehci_hcd_sead3_driver
 #endif
 
-#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
-       !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_OMAP) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_ORION) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_SPEAR) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_S5P) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_AT91) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_MSM) && \
-       !defined(PLATFORM_DRIVER) && \
-       !defined(PS3_SYSTEM_BUS_DRIVER) && \
-       !defined(OF_PLATFORM_DRIVER) && \
-       !defined(XILINX_OF_PLATFORM_DRIVER)
-#error "missing bus glue for ehci-hcd"
-#endif
-
 static int __init ehci_hcd_init(void)
 {
        int retval = 0;
index 3d1491b5f3605e68b0a72e1d5786233f5ad94d7b..16d7150e855722be6bc143c504d31120adf53f83 100644 (file)
@@ -90,8 +90,6 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = {
        .extra_priv_size = sizeof(struct omap_hcd),
 };
 
-static u64 omap_ehci_dma_mask = DMA_BIT_MASK(32);
-
 /**
  * ehci_hcd_omap_probe - initialize TI-based HCDs
  *
@@ -146,8 +144,10 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &omap_ehci_dma_mask;
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       if (!dev->coherent_dma_mask)
+               dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
                        dev_name(dev));
index 54c57948515051867df3dc8d218699ba6806e5ff..efbc588b48c50acbc2225c6be3f63ce1b1f35ffd 100644 (file)
@@ -137,8 +137,6 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
        }
 }
 
-static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32);
-
 static int ehci_orion_drv_probe(struct platform_device *pdev)
 {
        struct orion_ehci_data *pd = pdev->dev.platform_data;
@@ -183,7 +181,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
         * now. Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &ehci_orion_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        if (!request_mem_region(res->start, resource_size(res),
                                ehci_orion_hc_driver.description)) {
index 635775278c7fec525a1a3566fce05bb77f3ad77e..379037f51a2fc41b7e7aaaf112a759ce70615ac5 100644 (file)
@@ -71,8 +71,6 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev)
                dev_err(dev, "can't request ehci vbus gpio %d", gpio);
 }
 
-static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32);
-
 static int s5p_ehci_probe(struct platform_device *pdev)
 {
        struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
@@ -90,7 +88,7 @@ static int s5p_ehci_probe(struct platform_device *pdev)
         * Once we move to full device tree support this will vanish off.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &ehci_s5p_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
@@ -107,6 +105,7 @@ static int s5p_ehci_probe(struct platform_device *pdev)
        if (IS_ERR(phy)) {
                /* Fallback to pdata */
                if (!pdata) {
+                       usb_put_hcd(hcd);
                        dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
                        return -EPROBE_DEFER;
                } else {
index acff5b8f6e89dd89edf37d30377c2b1b86307c68..f80d0330d548d36564e9332d60392c9671360263 100644 (file)
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 }
 
 static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
 
 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
@@ -646,6 +646,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
        /* reschedule QH iff another request is queued */
        if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
                rc = qh_schedule(ehci, qh);
+               if (rc == 0) {
+                       qh_refresh(ehci, qh);
+                       qh_link_periodic(ehci, qh);
+               }
 
                /* An error here likely indicates handshake failure
                 * or no space left in the schedule.  Neither fault
@@ -653,9 +657,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
                 *
                 * FIXME kill the now-dysfunctional queued urbs
                 */
-               if (rc != 0)
+               else {
                        ehci_err(ehci, "can't reschedule qh %p, err %d\n",
                                        qh, rc);
+               }
        }
 
        /* maybe turn off periodic schedule */
index 61ecfb3d52f5d102f78d058fa5c032d37e0e7742..bd3e5cbc6240316dc5947f6f38dbc6a6c7492c51 100644 (file)
@@ -58,8 +58,6 @@ static int ehci_spear_drv_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
                ehci_spear_drv_resume);
 
-static u64 spear_ehci_dma_mask = DMA_BIT_MASK(32);
-
 static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
 {
        struct usb_hcd *hcd ;
@@ -84,7 +82,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &spear_ehci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index e3eddc31ac83fd4f2065a5815967700fa6932ef7..59d111bf44a9d961b45fc85b99d659a8a67230b4 100644 (file)
@@ -637,8 +637,6 @@ static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable)
        writel(val, base + TEGRA_USB_PORTSC1);
 }
 
-static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
-
 static int tegra_ehci_probe(struct platform_device *pdev)
 {
        struct resource *res;
@@ -661,7 +659,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &tegra_ehci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        setup_vbus_gpio(pdev, pdata);
 
index 125e261f5bfca15be3d658f4cd976434c58b4912..2facee53eab16103af0a3abc0d67712f8ecc991f 100644 (file)
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
        int retval = 1;
        unsigned long flags;
 
-       /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+       /* if !PM_RUNTIME, root hub timers won't get shut down ... */
        if (!HC_IS_RUNNING(hcd->state))
                return 0;
 
index bbb791bd7617d6db99e064713ea95c527a83bfbf..a13709ee4e5d933bf4514df16296d66c7700ef17 100644 (file)
@@ -373,8 +373,10 @@ static int isp1760_plat_probe(struct platform_device *pdev)
        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!irq_res) {
                pr_warning("isp1760: IRQ resource not available\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto cleanup;
        }
+
        irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
 
        if (priv) {
index a0cb44f0e72420ad2c777f09818229a2a3f7c3c6..2ee1496dbc1d37c1f778d847712f78437abe21a4 100644 (file)
@@ -504,8 +504,6 @@ static const struct of_device_id at91_ohci_dt_ids[] = {
 
 MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
 
-static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32);
-
 static int ohci_at91_of_init(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -522,7 +520,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &at91_ohci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index 07592c00af26ba166ad75280296c5364b19af9f2..b0b542c14e3132a9f35eceb86ef1255b676e13ca 100644 (file)
@@ -98,8 +98,6 @@ static const struct hc_driver exynos_ohci_hc_driver = {
        .start_port_reset       = ohci_start_port_reset,
 };
 
-static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32);
-
 static int exynos_ohci_probe(struct platform_device *pdev)
 {
        struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
@@ -117,7 +115,7 @@ static int exynos_ohci_probe(struct platform_device *pdev)
         * Once we move to full device tree support this will vanish off.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &ohci_exynos_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
index 9e6de9586ae4bf82afb01f4c70464ffbbcd01117..fc627fd5411670369d89ba4aec7428b55e2fdef0 100644 (file)
@@ -233,14 +233,14 @@ static int ohci_urb_enqueue (
                        urb->start_frame = frame;
                }
        } else if (ed->type == PIPE_ISOCHRONOUS) {
-               u16     next = ohci_frame_no(ohci) + 2;
+               u16     next = ohci_frame_no(ohci) + 1;
                u16     frame = ed->last_iso + ed->interval;
 
                /* Behind the scheduling threshold? */
                if (unlikely(tick_before(frame, next))) {
 
                        /* USB_ISO_ASAP: Round up to the first available slot */
-                       if (urb->transfer_flags & URB_ISO_ASAP)
+                       if (urb->transfer_flags & URB_ISO_ASAP) {
                                frame += (next - frame + ed->interval - 1) &
                                                -ed->interval;
 
@@ -248,21 +248,25 @@ static int ohci_urb_enqueue (
                         * Not ASAP: Use the next slot in the stream.  If
                         * the entire URB falls before the threshold, fail.
                         */
-                       else if (tick_before(frame + ed->interval *
+                       } else {
+                               if (tick_before(frame + ed->interval *
                                        (urb->number_of_packets - 1), next)) {
-                               retval = -EXDEV;
-                               usb_hcd_unlink_urb_from_ep(hcd, urb);
-                               goto fail;
-                       }
+                                       retval = -EXDEV;
+                                       usb_hcd_unlink_urb_from_ep(hcd, urb);
+                                       goto fail;
+                               }
 
-                       /*
-                        * Some OHCI hardware doesn't handle late TDs
-                        * correctly.  After retiring them it proceeds to
-                        * the next ED instead of the next TD.  Therefore
-                        * we have to omit the late TDs entirely.
-                        */
-                       urb_priv->td_cnt = DIV_ROUND_UP(next - frame,
-                                       ed->interval);
+                               /*
+                                * Some OHCI hardware doesn't handle late TDs
+                                * correctly.  After retiring them it proceeds
+                                * to the next ED instead of the next TD.
+                                * Therefore we have to omit the late TDs
+                                * entirely.
+                                */
+                               urb_priv->td_cnt = DIV_ROUND_UP(
+                                               (u16) (next - frame),
+                                               ed->interval);
+                       }
                }
                urb->start_frame = frame;
        }
index f4988fbe78e79988c59390b216f75b8c0ff8c8f7..5d7eb72c506403b6e3022f14b09aa47e2c8a97c1 100644 (file)
@@ -223,8 +223,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
 
        isp1301_i2c_client = isp1301_get_client(isp1301_node);
        if (!isp1301_i2c_client) {
-               ret = -EPROBE_DEFER;
-               goto out;
+               return -EPROBE_DEFER;
        }
 
        pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@@ -234,7 +233,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (usb_disabled()) {
                dev_err(&pdev->dev, "USB is disabled\n");
                ret = -ENODEV;
-               goto out;
+               goto fail_disable;
        }
 
        /* Enable AHB slave USB clock, needed for further USB clock control */
@@ -245,19 +244,19 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (IS_ERR(usb_pll_clk)) {
                dev_err(&pdev->dev, "failed to acquire USB PLL\n");
                ret = PTR_ERR(usb_pll_clk);
-               goto out1;
+               goto fail_pll;
        }
 
        ret = clk_enable(usb_pll_clk);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to start USB PLL\n");
-               goto out2;
+               goto fail_pllen;
        }
 
        ret = clk_set_rate(usb_pll_clk, 48000);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to set USB clock rate\n");
-               goto out3;
+               goto fail_rate;
        }
 
        /* Enable USB device clock */
@@ -265,13 +264,13 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (IS_ERR(usb_dev_clk)) {
                dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
                ret = PTR_ERR(usb_dev_clk);
-               goto out4;
+               goto fail_dev;
        }
 
        ret = clk_enable(usb_dev_clk);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
-               goto out5;
+               goto fail_deven;
        }
 
        /* Enable USB otg clocks */
@@ -279,7 +278,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (IS_ERR(usb_otg_clk)) {
                dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
                ret = PTR_ERR(usb_otg_clk);
-               goto out6;
+               goto fail_otg;
        }
 
        __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
@@ -287,7 +286,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        ret = clk_enable(usb_otg_clk);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
-               goto out7;
+               goto fail_otgen;
        }
 
        isp1301_configure();
@@ -296,20 +295,14 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (!hcd) {
                dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
                ret = -ENOMEM;
-               goto out8;
+               goto fail_hcd;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get MEM resource\n");
-               ret =  -ENOMEM;
-               goto out8;
-       }
-
        hcd->regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(hcd->regs)) {
                ret = PTR_ERR(hcd->regs);
-               goto out8;
+               goto fail_resource;
        }
        hcd->rsrc_start = res->start;
        hcd->rsrc_len = resource_size(res);
@@ -317,7 +310,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                ret = -ENXIO;
-               goto out8;
+               goto fail_resource;
        }
 
        nxp_start_hc();
@@ -331,23 +324,24 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
                return ret;
 
        nxp_stop_hc();
-out8:
+fail_resource:
        usb_put_hcd(hcd);
-out7:
+fail_hcd:
        clk_disable(usb_otg_clk);
-out6:
+fail_otgen:
        clk_put(usb_otg_clk);
-out5:
+fail_otg:
        clk_disable(usb_dev_clk);
-out4:
+fail_deven:
        clk_put(usb_dev_clk);
-out3:
+fail_dev:
+fail_rate:
        clk_disable(usb_pll_clk);
-out2:
+fail_pllen:
        clk_put(usb_pll_clk);
-out1:
+fail_pll:
+fail_disable:
        isp1301_i2c_client = NULL;
-out:
        return ret;
 }
 
index ddfc31427bc09e1fc95c63ea40f006ad00631e66..8663851c8d8eac79b5d358d9e6f81f6116d5fc41 100644 (file)
@@ -114,8 +114,6 @@ static const struct hc_driver ohci_omap3_hc_driver = {
 
 /*-------------------------------------------------------------------------*/
 
-static u64 omap_ohci_dma_mask = DMA_BIT_MASK(32);
-
 /*
  * configure so an HC device and id are always provided
  * always called with process context; sleeping is OK
@@ -168,8 +166,10 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &omap_ohci_dma_mask;
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       if (!dev->coherent_dma_mask)
+               dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
                        dev_name(dev));
index efe71f3ca4772068c557b3063f33cd247a0e19df..279b2ef1741149fe05deda764c09151763f56f1d 100644 (file)
@@ -282,8 +282,6 @@ static const struct of_device_id pxa_ohci_dt_ids[] = {
 
 MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids);
 
-static u64 pxa_ohci_dma_mask = DMA_BIT_MASK(32);
-
 static int ohci_pxa_of_init(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -298,7 +296,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pxa_ohci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index 9020bf0e2eca13f8837aa46ab8c5ee55969c8b49..3e19e0170d1195711b4f235fd4be043fb234d65e 100644 (file)
@@ -91,8 +91,6 @@ static const struct hc_driver ohci_spear_hc_driver = {
        .start_port_reset       = ohci_start_port_reset,
 };
 
-static u64 spear_ohci_dma_mask = DMA_BIT_MASK(32);
-
 static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
 {
        const struct hc_driver *driver = &ohci_spear_hc_driver;
@@ -114,7 +112,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &spear_ohci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index 4f0f0339532f7e2729fe90f6972301ec32116041..0f401dbfaf073bb59f7faad445a19663bded3a0e 100644 (file)
@@ -3084,7 +3084,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
        int ports, i, retval = 1;
        unsigned long flags;
 
-       /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+       /* if !PM_RUNTIME, root hub timers won't get shut down ... */
        if (!HC_IS_RUNNING(hcd->state))
                return 0;
 
index ad4483efb6d602cad2a841d8747b5983c16fc33c..b2ec7fe758ddcf3dba22f922493ca6cebdeaea10 100644 (file)
@@ -22,7 +22,7 @@
  * and usb-storage.
  *
  * TODO:
- * - usb suspend/resume triggered by sl811 (with USB_SUSPEND)
+ * - usb suspend/resume triggered by sl811 (with PM_RUNTIME)
  * - various issues noted in the code
  * - performance work; use both register banks; ...
  * - use urb->iso_frame_desc[] with ISO transfers
index f87bee6d2789256fc8c8dcdde71a6e4510108397..9189bc984c98ce9804d51ec94c4aebe4147b8f3f 100644 (file)
@@ -225,7 +225,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
                /* auto-stop if nothing connected for 1 second */
                if (any_ports_active(uhci))
                        uhci->rh_state = UHCI_RH_RUNNING;
-               else if (time_after_eq(jiffies, uhci->auto_stop_time))
+               else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
+                               !uhci->wait_for_hp)
                        suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
                break;
 
index 8c4dace4b14a5c0455a79b389347cdfab640bd05..f1db61ada6a84c435aedc3967f04f8aab76c1e55 100644 (file)
@@ -60,8 +60,6 @@ static const struct hc_driver uhci_platform_hc_driver = {
        .hub_control =          uhci_hub_control,
 };
 
-static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32);
-
 static int uhci_hcd_platform_probe(struct platform_device *pdev)
 {
        struct usb_hcd *hcd;
@@ -78,7 +76,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &platform_uhci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
                        pdev->name);
index f0976d8190bc363b52d908540de8214fbeed43b8..041c6ddb695c8ec6fa17d371fe7904de2e47d5ab 100644 (file)
@@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
                return -EINVAL;         /* Can't change the period */
 
        } else {
-               next = uhci->frame_number + 2;
+               next = uhci->frame_number + 1;
 
                /* Find the next unused frame */
                if (list_empty(&qh->queue)) {
index 965b539bc47410721c9021f41eb7bf051dcc8feb..fbf75e57628b72e0b7a74237d8a9610826b2ab93 100644 (file)
@@ -1423,15 +1423,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
 
        /* Set the max packet size and max burst */
+       max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+       max_burst = 0;
        switch (udev->speed) {
        case USB_SPEED_SUPER:
-               max_packet = usb_endpoint_maxp(&ep->desc);
-               ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
                /* dig out max burst from ep companion desc */
-               max_packet = ep->ss_ep_comp.bMaxBurst;
-               ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
+               max_burst = ep->ss_ep_comp.bMaxBurst;
                break;
        case USB_SPEED_HIGH:
+               /* Some devices get this wrong */
+               if (usb_endpoint_xfer_bulk(&ep->desc))
+                       max_packet = 512;
                /* bits 11:12 specify the number of additional transaction
                 * opportunities per microframe (USB 2.0, section 9.6.6)
                 */
@@ -1439,17 +1441,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                                usb_endpoint_xfer_int(&ep->desc)) {
                        max_burst = (usb_endpoint_maxp(&ep->desc)
                                     & 0x1800) >> 11;
-                       ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
                }
-               /* Fall through */
+               break;
        case USB_SPEED_FULL:
        case USB_SPEED_LOW:
-               max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
-               ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
                break;
        default:
                BUG();
        }
+       ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
+                       MAX_BURST(max_burst));
        max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
        ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
 
@@ -1826,6 +1827,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        }
        spin_unlock_irqrestore(&xhci->lock, flags);
 
+       if (!xhci->rh_bw)
+               goto no_bw;
+
        num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
        for (i = 0; i < num_ports; i++) {
                struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
@@ -1844,6 +1848,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
                }
        }
 
+no_bw:
        xhci->num_usb2_ports = 0;
        xhci->num_usb3_ports = 0;
        xhci->num_active_eps = 0;
@@ -2255,6 +2260,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        u32 page_size, temp;
        int i;
 
+       INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+       INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+
        page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
        xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
        for (i = 0; i < 16; i++) {
@@ -2333,7 +2341,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
        if (!xhci->cmd_ring)
                goto fail;
-       INIT_LIST_HEAD(&xhci->cancel_cmd_list);
        xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
        xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
                        (unsigned long long)xhci->cmd_ring->first_seg->dma);
@@ -2444,8 +2451,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        if (xhci_setup_port_arrays(xhci, flags))
                goto fail;
 
-       INIT_LIST_HEAD(&xhci->lpm_failed_devs);
-
        /* Enable USB 3.0 device notifications for function remote wake, which
         * is necessary for allowing USB 3.0 devices to do remote wakeup from
         * U3 (device suspend).
index 1a30c380043ce258aa660e67edb38210707fffd6..cc24e39b97d5b8b2bdf7faada7f7767b4f8013c2 100644 (file)
@@ -221,6 +221,14 @@ static void xhci_pci_remove(struct pci_dev *dev)
 static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
+
+       /*
+        * Systems with the TI redriver that loses port status change events
+        * need to have the registers polled during D3, so avoid D3cold.
+        */
+       if (xhci_compliance_mode_recovery_timer_quirk_check())
+               pdev->no_d3cold = true;
 
        return xhci_suspend(xhci);
 }
index b4aa79d154b28b29db4b2a6509e974b74fdd4192..d8f640b12dd9d950e842892858a617b7fa97247e 100644 (file)
@@ -466,7 +466,7 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
  * Systems:
  * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
  */
-static bool compliance_mode_recovery_timer_quirk_check(void)
+bool xhci_compliance_mode_recovery_timer_quirk_check(void)
 {
        const char *dmi_product_name, *dmi_sys_vendor;
 
@@ -517,7 +517,7 @@ int xhci_init(struct usb_hcd *hcd)
        xhci_dbg(xhci, "Finished xhci_init\n");
 
        /* Initializing Compliance Mode Recovery Data If Needed */
-       if (compliance_mode_recovery_timer_quirk_check()) {
+       if (xhci_compliance_mode_recovery_timer_quirk_check()) {
                xhci->quirks |= XHCI_COMP_MODE_QUIRK;
                compliance_mode_recovery_timer_init(xhci);
        }
@@ -956,6 +956,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
+       bool                    comp_timer_running = false;
 
        /* Wait a bit if either of the roothubs need to settle from the
         * transition into bus suspend.
@@ -993,6 +994,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
        /* If restore operation fails, re-initialize the HC during resume */
        if ((temp & STS_SRE) || hibernated) {
+
+               if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                               !(xhci_all_ports_seen_u0(xhci))) {
+                       del_timer_sync(&xhci->comp_mode_recovery_timer);
+                       xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
+               }
+
                /* Let the USB core know _both_ roothubs lost power. */
                usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
                usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
@@ -1035,6 +1043,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                retval = xhci_init(hcd->primary_hcd);
                if (retval)
                        return retval;
+               comp_timer_running = true;
+
                xhci_dbg(xhci, "Start the primary HCD\n");
                retval = xhci_run(hcd->primary_hcd);
                if (!retval) {
@@ -1076,7 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
         * to suffer the Compliance Mode issue again. It doesn't matter if
         * ports have entered previously to U0 before system's suspension.
         */
-       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+       if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
                compliance_mode_recovery_timer_init(xhci);
 
        /* Re-enable port polling. */
index 29c978e37135a9c836808407907803f4770fadae..77600cefcaf1df6ed3209a41c2a00dcdbb31b4dc 100644 (file)
@@ -1853,4 +1853,7 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
 
+/* xHCI quirks */
+bool xhci_compliance_mode_recovery_timer_quirk_check(void);
+
 #endif /* __LINUX_XHCI_HCD_H */
index 3a18e44e9391850ab7f7ee7858cd6d134b59361e..e1b661d040217a3cdd0e76b325616f85adc04bd6 100644 (file)
@@ -560,6 +560,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
                if (!config) {
                        dev_err(&pdev->dev,
                                "failed to allocate musb hdrc config\n");
+                       ret = -ENOMEM;
                        goto err2;
                }
 
index 8914dec49f01cfc2e70a89d1e2de1c29a70ef08c..9d3044bdebe54c68ac7d506a2fa593f59287c4c0 100644 (file)
@@ -1232,7 +1232,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
        void __iomem            *mbase = musb->mregs;
        struct dma_channel      *dma;
        bool                    transfer_pending = false;
-       static bool use_sg;
 
        musb_ep_select(mbase, epnum);
        tx_csr = musb_readw(epio, MUSB_TXCSR);
@@ -1463,9 +1462,9 @@ done:
         * NULL.
         */
        if (!urb->transfer_buffer)
-               use_sg = true;
+               qh->use_sg = true;
 
-       if (use_sg) {
+       if (qh->use_sg) {
                /* sg_miter_start is already done in musb_ep_program */
                if (!sg_miter_next(&qh->sg_miter)) {
                        dev_err(musb->controller, "error: sg list empty\n");
@@ -1484,9 +1483,9 @@ done:
 
        qh->segsize = length;
 
-       if (use_sg) {
+       if (qh->use_sg) {
                if (offset + length >= urb->transfer_buffer_length)
-                       use_sg = false;
+                       qh->use_sg = false;
        }
 
        musb_ep_select(mbase, epnum);
@@ -1552,7 +1551,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
        bool                    done = false;
        u32                     status;
        struct dma_channel      *dma;
-       static bool use_sg;
        unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
 
        musb_ep_select(mbase, epnum);
@@ -1878,12 +1876,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                         * NULL.
                         */
                        if (!urb->transfer_buffer) {
-                               use_sg = true;
+                               qh->use_sg = true;
                                sg_miter_start(&qh->sg_miter, urb->sg, 1,
                                                sg_flags);
                        }
 
-                       if (use_sg) {
+                       if (qh->use_sg) {
                                if (!sg_miter_next(&qh->sg_miter)) {
                                        dev_err(musb->controller, "error: sg list empty\n");
                                        sg_miter_stop(&qh->sg_miter);
@@ -1913,8 +1911,8 @@ finish:
        urb->actual_length += xfer_len;
        qh->offset += xfer_len;
        if (done) {
-               if (use_sg)
-                       use_sg = false;
+               if (qh->use_sg)
+                       qh->use_sg = false;
 
                if (urb->status == -EINPROGRESS)
                        urb->status = status;
index 5a9c8feec10c8bc5814bec18a7018585c25be47a..738f7eb60df96d8aa78859720940af81c007cd3b 100644 (file)
@@ -74,6 +74,7 @@ struct musb_qh {
        u16                     frame;          /* for periodic schedule */
        unsigned                iso_idx;        /* in urb->iso_frame_desc[] */
        struct sg_mapping_iter sg_miter;        /* for highmem in PIO mode */
+       bool                    use_sg;         /* to track urb using sglist */
 };
 
 /* map from control or bulk queue head to the first qh on that ring */
index 3551f1a30c655efdc2977556e9e4f6c540ea115a..628b93fe5cccf585d21759fa112086a345a2e92a 100644 (file)
@@ -549,7 +549,8 @@ static int omap2430_probe(struct platform_device *pdev)
                glue->control_otghs = omap_get_control_dev();
                if (IS_ERR(glue->control_otghs)) {
                        dev_vdbg(&pdev->dev, "Failed to get control device\n");
-                       return -ENODEV;
+                       ret = PTR_ERR(glue->control_otghs);
+                       goto err2;
                }
        } else {
                glue->control_otghs = ERR_PTR(-ENODEV);
index 371d0e74e9094132eb378366858dedd7e654d89f..7ef3eb8617a6c7adc7775bc76a5fb0fd3d699ef8 100644 (file)
@@ -25,7 +25,7 @@ config AB8500_USB
 
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
-       depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND
+       depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
        select USB_OTG
        help
          Enable this to support Freescale USB OTG transceiver.
@@ -139,7 +139,6 @@ config USB_ISP1301
        tristate "NXP ISP1301 USB transceiver support"
        depends on USB || USB_GADGET
        depends on I2C
-       select USB_OTG_UTILS
        help
          Say Y here to add support for the NXP ISP1301 USB transceiver driver.
          This chip is typically used as USB transceiver for USB host, gadget
@@ -162,7 +161,7 @@ config USB_MSM_OTG
 
 config USB_MV_OTG
        tristate "Marvell USB OTG support"
-       depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND
+       depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME
        select USB_OTG
        help
          Say Y here if you want to build Marvell USB OTG transciever
index 4acef26a2ef5c2daa5b7a163e2afd297903e9642..e5eb1b5a04ebb815ffd46be5c1ab48b2fb8ed193 100644 (file)
@@ -892,8 +892,6 @@ static int ab8500_usb_remove(struct platform_device *pdev)
        else if (ab->mode == USB_PERIPHERAL)
                ab8500_usb_peri_phy_dis(ab);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 97b9308507c3dae718828645a79069c6af2de46e..e771bafb9f1da3375f9f0492493c973fe0862484 100644 (file)
@@ -799,6 +799,7 @@ static int fsl_otg_conf(struct platform_device *pdev)
 
        /* initialize the otg structure */
        fsl_otg_tc->phy.label = DRIVER_DESC;
+       fsl_otg_tc->phy.dev = &pdev->dev;
        fsl_otg_tc->phy.set_power = fsl_otg_set_power;
 
        fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy;
index 4c76074e518d56f60c192f560dfc7a3d049de301..8443335c2ea0260414ea183fe69c809b5b328d64 100644 (file)
@@ -266,6 +266,7 @@ static int __init gpio_vbus_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, gpio_vbus);
        gpio_vbus->dev = &pdev->dev;
        gpio_vbus->phy.label = "gpio-vbus";
+       gpio_vbus->phy.dev = gpio_vbus->dev;
        gpio_vbus->phy.set_power = gpio_vbus_set_power;
        gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend;
        gpio_vbus->phy.state = OTG_STATE_UNDEFINED;
@@ -343,7 +344,6 @@ err_irq:
                gpio_free(pdata->gpio_pullup);
        gpio_free(pdata->gpio_vbus);
 err_gpio:
-       platform_set_drvdata(pdev, NULL);
        kfree(gpio_vbus->phy.otg);
        kfree(gpio_vbus);
        return err;
@@ -365,7 +365,6 @@ static int __exit gpio_vbus_remove(struct platform_device *pdev)
        if (gpio_is_valid(pdata->gpio_pullup))
                gpio_free(pdata->gpio_pullup);
        gpio_free(gpio);
-       platform_set_drvdata(pdev, NULL);
        kfree(gpio_vbus->phy.otg);
        kfree(gpio_vbus);
 
index 225ae6c97eeb4758945cc08b558bdd27ddc7822d..8a55b37d1a024687f6be8078ef4f54f60cdc5111 100644 (file)
@@ -102,6 +102,7 @@ static int isp1301_probe(struct i2c_client *client,
        mutex_init(&isp->mutex);
 
        phy = &isp->phy;
+       phy->dev = &client->dev;
        phy->label = DRV_NAME;
        phy->init = isp1301_phy_init;
        phy->set_vbus = isp1301_phy_set_vbus;
index f7838a43347c64d855fe396c7da3ec2c81246a7b..1568ea63e3380766be04a0491f891e5471bf9ba9 100644 (file)
@@ -278,11 +278,6 @@ static int mv_u3d_phy_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "missing mem resource\n");
-               return -ENODEV;
-       }
-
        phy_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(phy_base))
                return PTR_ERR(phy_base);
index c987bbe278519c04bfc2e390dd59dd0ba6a8be60..4a6b03c738765814adc2eefe6bbbdef6eba8bf0b 100644 (file)
@@ -667,7 +667,6 @@ int mv_otg_remove(struct platform_device *pdev)
        mv_otg_disable(mvotg);
 
        usb_remove_phy(&mvotg->phy);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
@@ -850,8 +849,6 @@ err_destroy_workqueue:
        flush_workqueue(mvotg->qwork);
        destroy_workqueue(mvotg->qwork);
 
-       platform_set_drvdata(pdev, NULL);
-
        return retval;
 }
 
index 9d4381e64d5126d086af1cbca806519e4813f602..bd601c537c8d434a3e33a7aa59d88d583472244f 100644 (file)
@@ -130,11 +130,6 @@ static int mxs_phy_probe(struct platform_device *pdev)
        int ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "can't get device resources\n");
-               return -ENOENT;
-       }
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
@@ -160,6 +155,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
        mxs_phy->phy.set_suspend        = mxs_phy_suspend;
        mxs_phy->phy.notify_connect     = mxs_phy_on_connect;
        mxs_phy->phy.notify_disconnect  = mxs_phy_on_disconnect;
+       mxs_phy->phy.type               = USB_PHY_TYPE_USB2;
 
        ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier);
 
@@ -180,8 +176,6 @@ static int mxs_phy_remove(struct platform_device *pdev)
 
        usb_remove_phy(&mxs_phy->phy);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 2b10cc969bbb1f4457a463c2072508953a42fdc2..638cc5dade35227732e77e6c5ccb0d769eb47d8b 100644 (file)
@@ -254,8 +254,6 @@ static int nop_usb_xceiv_remove(struct platform_device *pdev)
 
        usb_remove_phy(&nop->phy);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 45ffe036dacc5acc455b8d3b22de6df0dfe7bef7..9d5e273abcc7652f1b3dbe17992720a170124cdf 100644 (file)
@@ -363,11 +363,6 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
        int ret;
 
        phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!phy_mem) {
-               dev_err(dev, "%s: missing mem resource\n", __func__);
-               return -ENODEV;
-       }
-
        phy_base = devm_ioremap_resource(dev, phy_mem);
        if (IS_ERR(phy_base))
                return PTR_ERR(phy_base);
index 133f3d0c554f9e7ed8979c1198f854db08def9ef..5a9efcbcb532cc7ecf7d118b0767db0b970b7960 100644 (file)
@@ -239,11 +239,6 @@ static int samsung_usb3phy_probe(struct platform_device *pdev)
        int ret;
 
        phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!phy_mem) {
-               dev_err(dev, "%s: missing mem resource\n", __func__);
-               return -ENODEV;
-       }
-
        phy_base = devm_ioremap_resource(dev, phy_mem);
        if (IS_ERR(phy_base))
                return PTR_ERR(phy_base);
index 3b16118cbf62d7d8be032f6a243a39fd5965a5be..40e7fd94646f4bfbad247c486bc8659dad6d8f0a 100644 (file)
@@ -43,7 +43,7 @@
 #define DRIVER_NAME "ark3116"
 
 /* usb timeout of 1 second */
-#define ARK_TIMEOUT (1*HZ)
+#define ARK_TIMEOUT 1000
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x6547, 0x0232) },
index d341555d37d8bbe85f719d782049806522d2a066..082120198f870b2104d5230a5f1274803bc1074f 100644 (file)
@@ -65,6 +65,7 @@ static const struct usb_device_id id_table_earthmate[] = {
 static const struct usb_device_id id_table_cyphidcomrs232[] = {
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
        { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+       { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
        { }                                             /* Terminating entry */
 };
 
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
        { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+       { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
        { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
        { }                                             /* Terminating entry */
 };
@@ -229,6 +231,12 @@ static struct usb_serial_driver * const serial_drivers[] = {
  * Cypress serial helper functions
  *****************************************************************************/
 
+/* FRWD Dongle hidcom needs to skip reset and speed checks */
+static inline bool is_frwd(struct usb_device *dev)
+{
+       return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
+               (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
+}
 
 static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
 {
@@ -238,6 +246,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
        if (unstable_bauds)
                return new_rate;
 
+       /* FRWD Dongle uses 115200 bps */
+       if (is_frwd(port->serial->dev))
+               return new_rate;
+
        /*
         * The general purpose firmware for the Cypress M8 allows for
         * a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -448,7 +460,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
        }
 
-       usb_reset_configuration(serial->dev);
+       /* Skip reset for FRWD device. It is a workaound:
+          device hangs if it receives SET_CONFIGURE in Configured
+          state. */
+       if (!is_frwd(serial->dev))
+               usb_reset_configuration(serial->dev);
 
        priv->cmd_ctrl = 0;
        priv->line_control = 0;
index 67cf60826884fbb3c9eaebaa7ba621e0e22ad947..b461311a2ae71d7ddfd7695e6bb8801daa6354aa 100644 (file)
 #define VENDOR_ID_CYPRESS              0x04b4
 #define PRODUCT_ID_CYPHIDCOM           0x5500
 
+/* FRWD Dongle - a GPS sports watch */
+#define VENDOR_ID_FRWD                 0x6737
+#define PRODUCT_ID_CYPHIDCOM_FRWD      0x0001
+
 /* Powercom UPS, chip CY7C63723 */
 #define VENDOR_ID_POWERCOM             0x0d9f
 #define PRODUCT_ID_UPS                 0x0002
index 090b411d893f8b957d816eb6b45bd599948ea055..7d8dd5aad236ee8f52ac761eda06c54c5f9539c3 100644 (file)
@@ -165,11 +165,12 @@ static void f81232_set_termios(struct tty_struct *tty,
        /* FIXME - Stubbed out for now */
 
        /* Don't change anything if nothing has changed */
-       if (!tty_termios_hw_change(&tty->termios, old_termios))
+       if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
                return;
 
        /* Do the real work here... */
-       tty_termios_copy_hw(&tty->termios, old_termios);
+       if (old_termios)
+               tty_termios_copy_hw(&tty->termios, old_termios);
 }
 
 static int f81232_tiocmget(struct tty_struct *tty)
@@ -187,12 +188,11 @@ static int f81232_tiocmset(struct tty_struct *tty,
 
 static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-       struct ktermios tmp_termios;
        int result;
 
        /* Setup termios */
        if (tty)
-               f81232_set_termios(tty, port, &tmp_termios);
+               f81232_set_termios(tty, port, NULL);
 
        result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
        if (result) {
index 242b5776648ab31c1e820bcc82dad0a48c629775..7260ec66034715e7a065d0f5fe6441f49a37f6b4 100644 (file)
@@ -189,6 +189,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
        { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+       { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
+       { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
@@ -924,8 +926,8 @@ static int  ftdi_tiocmset(struct tty_struct *tty,
 static int  ftdi_ioctl(struct tty_struct *tty,
                        unsigned int cmd, unsigned long arg);
 static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static int ftdi_chars_in_buffer(struct tty_struct *tty);
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static bool ftdi_tx_empty(struct usb_serial_port *port);
+static int ftdi_get_modem_status(struct usb_serial_port *port,
                                                unsigned char status[2]);
 
 static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
@@ -961,7 +963,7 @@ static struct usb_serial_driver ftdi_sio_device = {
        .ioctl =                ftdi_ioctl,
        .set_termios =          ftdi_set_termios,
        .break_ctl =            ftdi_break_ctl,
-       .chars_in_buffer =      ftdi_chars_in_buffer,
+       .tx_empty =             ftdi_tx_empty,
 };
 
 static struct usb_serial_driver * const serial_drivers[] = {
@@ -2056,27 +2058,18 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
 
 }
 
-static int ftdi_chars_in_buffer(struct tty_struct *tty)
+static bool ftdi_tx_empty(struct usb_serial_port *port)
 {
-       struct usb_serial_port *port = tty->driver_data;
-       int chars;
        unsigned char buf[2];
        int ret;
 
-       chars = usb_serial_generic_chars_in_buffer(tty);
-       if (chars)
-               goto out;
-
-       /* Check if hardware buffer is empty. */
-       ret = ftdi_get_modem_status(tty, buf);
+       ret = ftdi_get_modem_status(port, buf);
        if (ret == 2) {
                if (!(buf[1] & FTDI_RS_TEMT))
-                       chars = 1;
+                       return false;
        }
-out:
-       dev_dbg(&port->dev, "%s - %d\n", __func__, chars);
 
-       return chars;
+       return true;
 }
 
 /* old_termios contains the original termios settings and tty->termios contains
@@ -2268,10 +2261,9 @@ no_c_cflag_changes:
  * Returns the number of status bytes retrieved (device dependant), or
  * negative error code.
  */
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static int ftdi_get_modem_status(struct usb_serial_port *port,
                                                unsigned char status[2])
 {
-       struct usb_serial_port *port = tty->driver_data;
        struct ftdi_private *priv = usb_get_serial_port_data(port);
        unsigned char *buf;
        int len;
@@ -2336,7 +2328,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
        unsigned char buf[2];
        int ret;
 
-       ret = ftdi_get_modem_status(tty, buf);
+       ret = ftdi_get_modem_status(port, buf);
        if (ret < 0)
                return ret;
 
index 98528270c43c21f5f8d66ccf594a269b32f0600f..6dd79253205dd7ec54278ddb078f08346cdbb4f8 100644 (file)
  */
 #define NEWPORT_VID                    0x104D
 #define NEWPORT_AGILIS_PID             0x3000
+#define NEWPORT_CONEX_CC_PID           0x3002
+#define NEWPORT_CONEX_AGP_PID          0x3006
 
 /* Interbiometrics USB I/O Board */
 /* Developed for Interbiometrics by Rudolf Gugler */
index 297665fdd16d31993a5e95cddd313626453be6c7..ba45170c78e5f601ec6b2c57f5f05600247803f4 100644 (file)
@@ -253,6 +253,37 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
 }
 EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
 
+void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+{
+       struct usb_serial_port *port = tty->driver_data;
+       unsigned int bps;
+       unsigned long period;
+       unsigned long expire;
+
+       bps = tty_get_baud_rate(tty);
+       if (!bps)
+               bps = 9600;     /* B0 */
+       /*
+        * Use a poll-period of roughly the time it takes to send one
+        * character or at least one jiffy.
+        */
+       period = max_t(unsigned long, (10 * HZ / bps), 1);
+       period = min_t(unsigned long, period, timeout);
+
+       dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
+                                       __func__, jiffies_to_msecs(timeout),
+                                       jiffies_to_msecs(period));
+       expire = jiffies + timeout;
+       while (!port->serial->type->tx_empty(port)) {
+               schedule_timeout_interruptible(period);
+               if (signal_pending(current))
+                       break;
+               if (time_after(jiffies, expire))
+                       break;
+       }
+}
+EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent);
+
 static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
                                                int index, gfp_t mem_flags)
 {
index 158bf4bc29cc2b6f60c7211ad0e9f8b75186bf04..1be6ba7bee27452ac55c08a06d98ff2394a2f7fb 100644 (file)
@@ -2019,8 +2019,6 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
        struct edgeport_port *edge_port = usb_get_serial_port_data(port);
        int chars = 0;
        unsigned long flags;
-       int ret;
-
        if (edge_port == NULL)
                return 0;
 
@@ -2028,16 +2026,22 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
        chars = kfifo_len(&edge_port->write_fifo);
        spin_unlock_irqrestore(&edge_port->ep_lock, flags);
 
-       if (!chars) {
-               ret = tx_active(edge_port);
-               if (ret > 0)
-                       chars = ret;
-       }
-
        dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
        return chars;
 }
 
+static bool edge_tx_empty(struct usb_serial_port *port)
+{
+       struct edgeport_port *edge_port = usb_get_serial_port_data(port);
+       int ret;
+
+       ret = tx_active(edge_port);
+       if (ret > 0)
+               return false;
+
+       return true;
+}
+
 static void edge_throttle(struct tty_struct *tty)
 {
        struct usb_serial_port *port = tty->driver_data;
@@ -2557,6 +2561,7 @@ static struct usb_serial_driver edgeport_1port_device = {
        .write                  = edge_write,
        .write_room             = edge_write_room,
        .chars_in_buffer        = edge_chars_in_buffer,
+       .tx_empty               = edge_tx_empty,
        .break_ctl              = edge_break,
        .read_int_callback      = edge_interrupt_callback,
        .read_bulk_callback     = edge_bulk_in_callback,
@@ -2589,6 +2594,7 @@ static struct usb_serial_driver edgeport_2port_device = {
        .write                  = edge_write,
        .write_room             = edge_write_room,
        .chars_in_buffer        = edge_chars_in_buffer,
+       .tx_empty               = edge_tx_empty,
        .break_ctl              = edge_break,
        .read_int_callback      = edge_interrupt_callback,
        .read_bulk_callback     = edge_bulk_in_callback,
index 9d74c278b7b58d5f9ce6878d35d5ff66d869a5cf..790673e5faa744de7da4dfcb38fa2ba11b418ea6 100644 (file)
@@ -287,7 +287,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
            usb_bulk_msg(serial->dev,
                         usb_sndbulkpipe(serial->dev,
                                         port->bulk_out_endpointAddress), buf,
-                        count, &actual, HZ * 1);
+                        count, &actual, 1000);
 
        if (status != IUU_OPERATION_OK)
                dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
@@ -307,7 +307,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
            usb_bulk_msg(serial->dev,
                         usb_rcvbulkpipe(serial->dev,
                                         port->bulk_in_endpointAddress), buf,
-                        count, &actual, HZ * 1);
+                        count, &actual, 1000);
 
        if (status != IUU_OPERATION_OK)
                dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
index eb30d7b01f3681bd00c7fe73827533a613f162b6..3549d073df229617690f90d29b6c2e1f77648b64 100644 (file)
@@ -1548,7 +1548,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
        struct keyspan_serial_private           *s_priv;
        struct keyspan_port_private             *p_priv;
        const struct keyspan_device_details     *d_details;
-       int                                     outcont_urb;
        struct urb                              *this_urb;
        int                                     device_port, err;
 
@@ -1559,7 +1558,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
        d_details = s_priv->device_details;
        device_port = port->number - port->serial->minor;
 
-       outcont_urb = d_details->outcont_endpoints[port->number];
        this_urb = p_priv->outcont_urb;
 
        dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe));
@@ -1685,14 +1683,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
        err = usb_submit_urb(this_urb, GFP_ATOMIC);
        if (err != 0)
                dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
-#if 0
-       else {
-               dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__
-                       outcont_urb, this_urb->transfer_buffer_length,
-                       usb_pipeendpoint(this_urb->pipe));
-       }
-#endif
-
        return 0;
 }
 
index cc0e54345df98c03621949fab2bdc91b7a90af3e..f27c621a9297f3c896b84c9ea700d6285ef6b94f 100644 (file)
@@ -40,7 +40,7 @@
 #define DRIVER_DESC "Moschip USB Serial Driver"
 
 /* default urb timeout */
-#define MOS_WDR_TIMEOUT        (HZ * 5)
+#define MOS_WDR_TIMEOUT        5000
 
 #define MOS_MAX_PORT   0x02
 #define MOS_WRITE      0x0E
@@ -227,11 +227,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
        __u8 requesttype = (__u8)0xc0;
        __u16 index = get_reg_index(reg);
        __u16 value = get_reg_value(reg, serial_portnum);
-       int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
-                                    index, data, 1, MOS_WDR_TIMEOUT);
-       if (status < 0)
+       u8 *buf;
+       int status;
+
+       buf = kmalloc(1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+                                    index, buf, 1, MOS_WDR_TIMEOUT);
+       if (status == 1)
+               *data = *buf;
+       else if (status < 0)
                dev_err(&usbdev->dev,
                        "mos7720: usb_control_msg() failed: %d", status);
+       kfree(buf);
+
        return status;
 }
 
@@ -1618,7 +1629,7 @@ static void change_port_settings(struct tty_struct *tty,
                mos7720_port->shadowMCR |= (UART_MCR_XONANY);
                /* To set hardware flow control to the specified *
                 * serial port, in SP1/2_CONTROL_REG             */
-               if (port->number)
+               if (port_number)
                        write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
                else
                        write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
@@ -1927,7 +1938,7 @@ static int mos7720_startup(struct usb_serial *serial)
 
        /* setting configuration feature to one */
        usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
-                       (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
+                       (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
 
        /* start the interrupt urb */
        ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
@@ -1970,7 +1981,7 @@ static void mos7720_release(struct usb_serial *serial)
                /* wait for synchronous usb calls to return */
                if (mos_parport->msg_pending)
                        wait_for_completion_timeout(&mos_parport->syncmsg_compl,
-                                                   MOS_WDR_TIMEOUT);
+                                           msecs_to_jiffies(MOS_WDR_TIMEOUT));
 
                parport_remove_port(mos_parport->pp);
                usb_set_serial_data(serial, NULL);
index a0d5ea5459823fbd528d83f5676120a8c456eca0..7e998081e1cd9b42651143e7f01e63286410393d 100644 (file)
@@ -2142,13 +2142,21 @@ static int mos7840_ioctl(struct tty_struct *tty,
 static int mos7810_check(struct usb_serial *serial)
 {
        int i, pass_count = 0;
+       u8 *buf;
        __u16 data = 0, mcr_data = 0;
        __u16 test_pattern = 0x55AA;
+       int res;
+
+       buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+       if (!buf)
+               return 0;       /* failed to identify 7810 */
 
        /* Store MCR setting */
-       usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+       res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
                MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER,
-               &mcr_data, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+       if (res == VENDOR_READ_LENGTH)
+               mcr_data = *buf;
 
        for (i = 0; i < 16; i++) {
                /* Send the 1-bit test pattern out to MCS7810 test pin */
@@ -2158,9 +2166,12 @@ static int mos7810_check(struct usb_serial *serial)
                        MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT);
 
                /* Read the test pattern back */
-               usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
-                       MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
-                       VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               res = usb_control_msg(serial->dev,
+                               usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ,
+                               MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+                               VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               if (res == VENDOR_READ_LENGTH)
+                       data = *buf;
 
                /* If this is a MCS7810 device, both test patterns must match */
                if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001)
@@ -2174,6 +2185,8 @@ static int mos7810_check(struct usb_serial *serial)
                MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL,
                0, MOS_WDR_TIMEOUT);
 
+       kfree(buf);
+
        if (pass_count == 16)
                return 1;
 
@@ -2183,11 +2196,17 @@ static int mos7810_check(struct usb_serial *serial)
 static int mos7840_calc_num_ports(struct usb_serial *serial)
 {
        __u16 data = 0x00;
+       u8 *buf;
        int mos7840_num_ports;
 
-       usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
-               MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
-               VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+       buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+       if (buf) {
+               usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+                       MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+                       VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               data = *buf;
+               kfree(buf);
+       }
 
        if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
                serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
index 734372846abbb31b1381841f4f144a09a4f0b2a0..bd4323ddae1aa19002b92998d52d00b9bd13a7cf 100644 (file)
@@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb);
 
 #define DELL_PRODUCT_5800_MINICARD_VZW         0x8195  /* Novatel E362 */
 #define DELL_PRODUCT_5800_V2_MINICARD_VZW      0x8196  /* Novatel E362 */
+#define DELL_PRODUCT_5804_MINICARD_ATT         0x819b  /* Novatel E371 */
 
 #define KYOCERA_VENDOR_ID                      0x0c88
 #define KYOCERA_PRODUCT_KPC650                 0x17da
@@ -249,13 +250,7 @@ static void option_instat_callback(struct urb *urb);
 #define ZTE_PRODUCT_MF622                      0x0001
 #define ZTE_PRODUCT_MF628                      0x0015
 #define ZTE_PRODUCT_MF626                      0x0031
-#define ZTE_PRODUCT_CDMA_TECH                  0xfffe
-#define ZTE_PRODUCT_AC8710                     0xfff1
-#define ZTE_PRODUCT_AC2726                     0xfff5
-#define ZTE_PRODUCT_AC8710T                    0xffff
 #define ZTE_PRODUCT_MC2718                     0xffe8
-#define ZTE_PRODUCT_AD3812                     0xffeb
-#define ZTE_PRODUCT_MC2716                     0xffed
 
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
@@ -341,8 +336,8 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_EU3_E                        0x0051
 #define CINTERION_PRODUCT_EU3_P                        0x0052
 #define CINTERION_PRODUCT_PH8                  0x0053
-#define CINTERION_PRODUCT_AH                 0x0055
-#define CINTERION_PRODUCT_PLS8                 0x0060
+#define CINTERION_PRODUCT_AHXX                 0x0055
+#define CINTERION_PRODUCT_PLXX                 0x0060
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
@@ -494,18 +489,10 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
        .reserved = BIT(4),
 };
 
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
-       .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
 static const struct option_blacklist_info zte_mc2718_z_blacklist = {
        .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
 };
 
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
-       .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
        .reserved = BIT(1) | BIT(2),
 };
@@ -592,6 +579,8 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff),    /* Huawei E1820 */
+               .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
@@ -771,6 +760,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) },         /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
        { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },   /* ADU-E100, ADU-310 */
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -795,7 +785,6 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
-       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
@@ -966,6 +955,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
@@ -1195,16 +1186,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
 
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+       /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
         .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
-        .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
-        .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1264,8 +1248,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
-       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
-       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
index 7151659367a0898cec3124e8ee328bf17f57d6bd..048cd44d51b189352aea40e4275ebb0124e49344 100644 (file)
@@ -284,7 +284,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
           serial settings even to the same values as before. Thus
           we actually need to filter in this specific case */
 
-       if (!tty_termios_hw_change(&tty->termios, old_termios))
+       if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
                return;
 
        cflag = tty->termios.c_cflag;
@@ -293,7 +293,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
        if (!buf) {
                dev_err(&port->dev, "%s - out of memory.\n", __func__);
                /* Report back no change occurred */
-               tty->termios = *old_termios;
+               if (old_termios)
+                       tty->termios = *old_termios;
                return;
        }
 
@@ -433,7 +434,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
        control = priv->line_control;
        if ((cflag & CBAUD) == B0)
                priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
-       else if ((old_termios->c_cflag & CBAUD) == B0)
+       else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
                priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
        if (control != priv->line_control) {
                control = priv->line_control;
@@ -492,7 +493,6 @@ static void pl2303_close(struct usb_serial_port *port)
 
 static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-       struct ktermios tmp_termios;
        struct usb_serial *serial = port->serial;
        struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
        int result;
@@ -508,7 +508,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
 
        /* Setup termios */
        if (tty)
-               pl2303_set_termios(tty, port, &tmp_termios);
+               pl2303_set_termios(tty, port, NULL);
 
        result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
        if (result) {
index 59b32b7821264d6d02b34fe4df881496ebf271b2..bd794b43898cce03ef4ac6fb2c71ecc6563bc4c0 100644 (file)
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1199, 0x901b)},   /* Sierra Wireless MC7770 */
        {USB_DEVICE(0x12D1, 0x14F0)},   /* Sony Gobi 3000 QDL */
        {USB_DEVICE(0x12D1, 0x14F1)},   /* Sony Gobi 3000 Composite */
+       {USB_DEVICE(0x0AF0, 0x8120)},   /* Option GTM681W */
 
        /* non Gobi Qualcomm serial devices */
        {USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)},       /* Sierra Wireless MC7700 Device Management */
index cf3df793c2b7a1a7781e908e0a5d2385e7056f5b..ddf6c47137dc4e557630b70156ad146f82d441c7 100644 (file)
@@ -291,7 +291,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
        struct spcp8x5_private *priv = usb_get_serial_port_data(port);
        unsigned long flags;
        unsigned int cflag = tty->termios.c_cflag;
-       unsigned int old_cflag = old_termios->c_cflag;
        unsigned short uartdata;
        unsigned char buf[2] = {0, 0};
        int baud;
@@ -299,15 +298,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
        u8 control;
 
        /* check that they really want us to change something */
-       if (!tty_termios_hw_change(&tty->termios, old_termios))
+       if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
                return;
 
        /* set DTR/RTS active */
        spin_lock_irqsave(&priv->lock, flags);
        control = priv->line_control;
-       if ((old_cflag & CBAUD) == B0) {
+       if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
                priv->line_control |= MCR_DTR;
-               if (!(old_cflag & CRTSCTS))
+               if (!(old_termios->c_cflag & CRTSCTS))
                        priv->line_control |= MCR_RTS;
        }
        if (control != priv->line_control) {
@@ -394,7 +393,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
 
 static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-       struct ktermios tmp_termios;
        struct usb_serial *serial = port->serial;
        struct spcp8x5_private *priv = usb_get_serial_port_data(port);
        int ret;
@@ -411,7 +409,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
        spcp8x5_set_ctrl_line(port, priv->line_control);
 
        if (tty)
-               spcp8x5_set_termios(tty, port, &tmp_termios);
+               spcp8x5_set_termios(tty, port, NULL);
 
        port->port.drain_delay = 256;
 
index cac47aef2918972ead4aeb01a43bd9166daec0ca..c92c5ed4e580ec6761d0acabc379cfe667faaeb1 100644 (file)
@@ -101,6 +101,7 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
                const unsigned char *data, int count);
 static int ti_write_room(struct tty_struct *tty);
 static int ti_chars_in_buffer(struct tty_struct *tty);
+static bool ti_tx_empty(struct usb_serial_port *port);
 static void ti_throttle(struct tty_struct *tty);
 static void ti_unthrottle(struct tty_struct *tty);
 static int ti_ioctl(struct tty_struct *tty,
@@ -222,6 +223,7 @@ static struct usb_serial_driver ti_1port_device = {
        .write                  = ti_write,
        .write_room             = ti_write_room,
        .chars_in_buffer        = ti_chars_in_buffer,
+       .tx_empty               = ti_tx_empty,
        .throttle               = ti_throttle,
        .unthrottle             = ti_unthrottle,
        .ioctl                  = ti_ioctl,
@@ -253,6 +255,7 @@ static struct usb_serial_driver ti_2port_device = {
        .write                  = ti_write,
        .write_room             = ti_write_room,
        .chars_in_buffer        = ti_chars_in_buffer,
+       .tx_empty               = ti_tx_empty,
        .throttle               = ti_throttle,
        .unthrottle             = ti_unthrottle,
        .ioctl                  = ti_ioctl,
@@ -684,8 +687,6 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
        struct ti_port *tport = usb_get_serial_port_data(port);
        int chars = 0;
        unsigned long flags;
-       int ret;
-       u8 lsr;
 
        if (tport == NULL)
                return 0;
@@ -694,16 +695,22 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
        chars = kfifo_len(&tport->write_fifo);
        spin_unlock_irqrestore(&tport->tp_lock, flags);
 
-       if (!chars) {
-               ret = ti_get_lsr(tport, &lsr);
-               if (!ret && !(lsr & TI_LSR_TX_EMPTY))
-                       chars = 1;
-       }
-
        dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
        return chars;
 }
 
+static bool ti_tx_empty(struct usb_serial_port *port)
+{
+       struct ti_port *tport = usb_get_serial_port_data(port);
+       int ret;
+       u8 lsr;
+
+       ret = ti_get_lsr(tport, &lsr);
+       if (!ret && !(lsr & TI_LSR_TX_EMPTY))
+               return false;
+
+       return true;
+}
 
 static void ti_throttle(struct tty_struct *tty)
 {
index cf75beb1251bc85b3c709b989e1ddd5220094c7c..5f6b1ff9d29e6c4166213d04263b83aca0a51a52 100644 (file)
@@ -359,20 +359,29 @@ static int serial_chars_in_buffer(struct tty_struct *tty)
 {
        struct usb_serial_port *port = tty->driver_data;
        struct usb_serial *serial = port->serial;
-       int count = 0;
 
        dev_dbg(tty->dev, "%s\n", __func__);
 
-       mutex_lock(&serial->disc_mutex);
-       /* if the device was unplugged then any remaining characters
-          fell out of the connector ;) */
        if (serial->disconnected)
-               count = 0;
-       else
-               count = serial->type->chars_in_buffer(tty);
-       mutex_unlock(&serial->disc_mutex);
+               return 0;
 
-       return count;
+       return serial->type->chars_in_buffer(tty);
+}
+
+static void serial_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+       struct usb_serial_port *port = tty->driver_data;
+       struct usb_serial *serial = port->serial;
+
+       dev_dbg(tty->dev, "%s\n", __func__);
+
+       if (!port->serial->type->wait_until_sent)
+               return;
+
+       mutex_lock(&serial->disc_mutex);
+       if (!serial->disconnected)
+               port->serial->type->wait_until_sent(tty, timeout);
+       mutex_unlock(&serial->disc_mutex);
 }
 
 static void serial_throttle(struct tty_struct *tty)
@@ -399,7 +408,7 @@ static int serial_ioctl(struct tty_struct *tty,
                                        unsigned int cmd, unsigned long arg)
 {
        struct usb_serial_port *port = tty->driver_data;
-       int retval = -ENODEV;
+       int retval = -ENOIOCTLCMD;
 
        dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd);
 
@@ -411,8 +420,6 @@ static int serial_ioctl(struct tty_struct *tty,
        default:
                if (port->serial->type->ioctl)
                        retval = port->serial->type->ioctl(tty, cmd, arg);
-               else
-                       retval = -ENOIOCTLCMD;
        }
 
        return retval;
@@ -1191,6 +1198,7 @@ static const struct tty_operations serial_ops = {
        .unthrottle =           serial_unthrottle,
        .break_ctl =            serial_break,
        .chars_in_buffer =      serial_chars_in_buffer,
+       .wait_until_sent =      serial_wait_until_sent,
        .tiocmget =             serial_tiocmget,
        .tiocmset =             serial_tiocmset,
        .get_icount =           serial_get_icount,
@@ -1316,6 +1324,8 @@ static void usb_serial_operations_init(struct usb_serial_driver *device)
        set_to_generic_if_null(device, close);
        set_to_generic_if_null(device, write_room);
        set_to_generic_if_null(device, chars_in_buffer);
+       if (device->tx_empty)
+               set_to_generic_if_null(device, wait_until_sent);
        set_to_generic_if_null(device, read_bulk_callback);
        set_to_generic_if_null(device, write_bulk_callback);
        set_to_generic_if_null(device, process_read_urb);
index 7573ec8a084f3f55c1f2597c2f2b699e58790351..9910aa2edf4b1916c518e3f83d11e39ff94226a0 100644 (file)
@@ -560,10 +560,19 @@ static int treo_attach(struct usb_serial *serial)
        */
 #define COPY_PORT(dest, src)                                           \
        do { \
+               int i;                                                  \
+                                                                       \
+               for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) {      \
+                       dest->read_urbs[i] = src->read_urbs[i];         \
+                       dest->read_urbs[i]->context = dest;             \
+                       dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \
+               }                                                       \
                dest->read_urb = src->read_urb;                         \
                dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
                dest->bulk_in_buffer = src->bulk_in_buffer;             \
+               dest->bulk_in_size = src->bulk_in_size;                 \
                dest->interrupt_in_urb = src->interrupt_in_urb;         \
+               dest->interrupt_in_urb->context = dest;                 \
                dest->interrupt_in_endpointAddress = \
                                        src->interrupt_in_endpointAddress;\
                dest->interrupt_in_buffer = src->interrupt_in_buffer;   \
index b9fca3586d741003a50ffb435be7de74020c48e8..347caad47a121d3f7a26ec7f3246a6960114ffc2 100644 (file)
@@ -649,7 +649,7 @@ static void firm_setup_port(struct tty_struct *tty)
        struct whiteheat_port_settings port_settings;
        unsigned int cflag = tty->termios.c_cflag;
 
-       port_settings.port = port->number + 1;
+       port_settings.port = port->number - port->serial->minor + 1;
 
        /* get the byte size */
        switch (cflag & CSIZE) {
index 39ee7373b4ee8f08cea1871867b498e1b72c8d2f..fca4c752a4ed233199d82a787c0ebfbfff32e71c 100644 (file)
@@ -41,9 +41,6 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        int len;
        unsigned char *buf;
 
-       if (port->number != 0)
-               return -ENODEV;
-
        buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -53,7 +50,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0001, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send  2st cmd and recieve data */
@@ -65,7 +62,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 3 cmd */
@@ -84,7 +81,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 4 cmd */
@@ -95,7 +92,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send 5 cmd */
@@ -107,7 +104,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 6 cmd */
@@ -126,7 +123,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
        kfree(buf);
 
@@ -166,9 +163,6 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        int len;
        unsigned char *buf;
 
-       if (port->number != 0)
-               return;
-
        buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
        if (!buf)
                return;
@@ -178,7 +172,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0002, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send 2st ctl cmd(CTL    21 22 03 00  00 00 00 00 ) */
@@ -186,7 +180,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send  3st cmd and recieve data */
@@ -198,7 +192,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 4 cmd */
@@ -217,7 +211,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 5 cmd */
@@ -228,7 +222,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send 6 cmd */
@@ -240,7 +234,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 7 cmd */
@@ -259,7 +253,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 8 cmd */
@@ -270,7 +264,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        kfree(buf);
@@ -279,11 +273,29 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
 }
 
 static const struct usb_device_id id_table[] = {
-       { USB_DEVICE(0x19d2, 0xffff) }, /* AC8700 */
-       { USB_DEVICE(0x19d2, 0xfffe) },
-       { USB_DEVICE(0x19d2, 0xfffd) }, /* MG880 */
+       /* AC8710, AC8710T */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
+        /* AC8700 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
+       /* MG880 */
+       { USB_DEVICE(0x19d2, 0xfffd) },
+       { USB_DEVICE(0x19d2, 0xfffc) },
+       { USB_DEVICE(0x19d2, 0xfffb) },
+       /* AC2726, AC8710_V3 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(0x19d2, 0xfff6) },
+       { USB_DEVICE(0x19d2, 0xfff7) },
+       { USB_DEVICE(0x19d2, 0xfff8) },
+       { USB_DEVICE(0x19d2, 0xfff9) },
+       { USB_DEVICE(0x19d2, 0xffee) },
+       /* AC2716, MC2716 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
+       /* AD3812 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(0x19d2, 0xffec) },
        { USB_DEVICE(0x05C6, 0x3197) },
        { USB_DEVICE(0x05C6, 0x6000) },
+       { USB_DEVICE(0x05C6, 0x9008) },
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 8623577bbbe70d608eb0866f18be56c20513a5bf..281be56d56485535eaef90e890bb2bfebb30ad00 100644 (file)
@@ -105,8 +105,9 @@ struct rts51x_chip {
        int status_len;
 
        u32 flag;
-#ifdef CONFIG_REALTEK_AUTOPM
        struct us_data *us;
+
+#ifdef CONFIG_REALTEK_AUTOPM
        struct timer_list rts51x_suspend_timer;
        unsigned long timer_expires;
        int pwr_state;
@@ -988,6 +989,7 @@ static int init_realtek_cr(struct us_data *us)
        us->extra = chip;
        us->extra_destructor = realtek_cr_destructor;
        us->max_lun = chip->max_lun = rts51x_get_max_lun(us);
+       chip->us = us;
 
        usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun);
 
@@ -1010,10 +1012,8 @@ static int init_realtek_cr(struct us_data *us)
                        SET_AUTO_DELINK(chip);
        }
 #ifdef CONFIG_REALTEK_AUTOPM
-       if (ss_en) {
-               chip->us = us;
+       if (ss_en)
                realtek_cr_autosuspend_setup(us);
-       }
 #endif
 
        usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag);
index acb7121a9316dd146a7cec60af798ecbe42b7681..6d78736563de748c319ade5c0ff5cf210028fc23 100644 (file)
@@ -1360,7 +1360,7 @@ static const struct file_operations vfio_device_fops = {
  */
 static char *vfio_devnode(struct device *dev, umode_t *mode)
 {
-       if (MINOR(dev->devt) == 0)
+       if (mode && (MINOR(dev->devt) == 0))
                *mode = S_IRUGO | S_IWUGO;
 
        return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
index 2b51e2336aa20006e47cb0f1ac7ea146120413a3..f80d3dd41d8c6d420c8fb2d640a81739f2049674 100644 (file)
@@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
 
 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
 {
-
-       bool zcopy;
        int i;
 
-       for (i = 0; i < n->dev.nvqs; ++i) {
-               zcopy = vhost_net_zcopy_mask & (0x1 << i);
-               if (zcopy)
-                       kfree(n->vqs[i].ubuf_info);
+       for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+               kfree(n->vqs[i].ubuf_info);
+               n->vqs[i].ubuf_info = NULL;
        }
 }
 
@@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
        bool zcopy;
        int i;
 
-       for (i = 0; i < n->dev.nvqs; ++i) {
+       for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
                zcopy = vhost_net_zcopy_mask & (0x1 << i);
                if (!zcopy)
                        continue;
@@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
        return 0;
 
 err:
-       while (i--) {
-               zcopy = vhost_net_zcopy_mask & (0x1 << i);
-               if (!zcopy)
-                       continue;
-               kfree(n->vqs[i].ubuf_info);
-       }
+       vhost_net_clear_ubuf_info(n);
        return -ENOMEM;
 }
 
@@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n)
 {
        int i;
 
+       vhost_net_clear_ubuf_info(n);
+
        for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
                n->vqs[i].done_idx = 0;
                n->vqs[i].upend_idx = 0;
                n->vqs[i].ubufs = NULL;
-               kfree(n->vqs[i].ubuf_info);
-               n->vqs[i].ubuf_info = NULL;
                n->vqs[i].vhost_hlen = 0;
                n->vqs[i].sock_hlen = 0;
        }
@@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net)
                                kref_get(&ubufs->kref);
                        }
                        nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
-               }
+               } else
+                       msg.msg_control = NULL;
                /* TODO: Check specific error and bomb out unless ENOBUFS? */
                err = sock->ops->sendmsg(NULL, sock, &msg, len);
                if (unlikely(err < 0)) {
@@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n)
        int r;
 
        mutex_lock(&n->dev.mutex);
+       if (vhost_dev_has_owner(&n->dev)) {
+               r = -EBUSY;
+               goto out;
+       }
        r = vhost_net_set_ubuf_info(n);
        if (r)
                goto out;
index beee7f5787e6c814b2420f745be2555ca6a71af5..60aa5ad09a2fdb74a457ecadf0130f056f114c8e 100644 (file)
@@ -343,6 +343,12 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
        return attach.ret;
 }
 
+/* Caller should have device mutex */
+bool vhost_dev_has_owner(struct vhost_dev *dev)
+{
+       return dev->mm;
+}
+
 /* Caller should have device mutex */
 long vhost_dev_set_owner(struct vhost_dev *dev)
 {
@@ -350,7 +356,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
        int err;
 
        /* Is there an owner already? */
-       if (dev->mm) {
+       if (vhost_dev_has_owner(dev)) {
                err = -EBUSY;
                goto err_mm;
        }
index a7ad63592987f33ed4fd773691b7f5f604220d51..64adcf99ff33893f1eb71b4c181dba4df739a8f3 100644 (file)
@@ -133,6 +133,7 @@ struct vhost_dev {
 
 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
 long vhost_dev_set_owner(struct vhost_dev *dev);
+bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
 struct vhost_memory *vhost_dev_reset_owner_prepare(void);
 void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
index bff0775e258c9ce30b1615e38492fa123eb49f8a..5174ebac288d65e70f31c13867e44aa98cc18ff4 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Since these may be in userspace, we use (inline) accessors.
  */
+#include <linux/module.h>
 #include <linux/vringh.h>
 #include <linux/virtio_ring.h>
 #include <linux/kernel.h>
@@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh)
        return __vringh_need_notify(vrh, getu16_kern);
 }
 EXPORT_SYMBOL(vringh_need_notify_kern);
+
+MODULE_LICENSE("GPL");
index d71d60f94fc19a7fcc34ace0940dfb841e9904fc..2e937bdace6f123a1b6f1c0c055b4422dde78875 100644 (file)
@@ -2199,7 +2199,7 @@ config FB_XILINX
 
 config FB_GOLDFISH
        tristate "Goldfish Framebuffer"
-       depends on FB
+       depends on FB && HAS_DMA
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
@@ -2453,6 +2453,23 @@ config FB_HYPERV
        help
          This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
 
+config FB_SIMPLE
+       bool "Simple framebuffer support"
+       depends on (FB = y) && OF
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       help
+         Say Y if you want support for a simple frame-buffer.
+
+         This driver assumes that the display hardware has been initialized
+         before the kernel boots, and the kernel will simply render to the
+         pre-allocated frame buffer surface.
+
+         Configuration re: surface address, size, and format must be provided
+         through device tree, or potentially plain old platform data in the
+         future.
+
 source "drivers/video/omap/Kconfig"
 source "drivers/video/omap2/Kconfig"
 source "drivers/video/exynos/Kconfig"
index 7234e4a959e8c5b61e9d49b32d1057f83a7f290c..e8bae8dd4804d4bf797f444f239d5005a689390e 100644 (file)
@@ -166,6 +166,7 @@ obj-$(CONFIG_FB_MX3)                  += mx3fb.o
 obj-$(CONFIG_FB_DA8XX)           += da8xx-fb.o
 obj-$(CONFIG_FB_MXS)             += mxsfb.o
 obj-$(CONFIG_FB_SSD1307)         += ssd1307fb.o
+obj-$(CONFIG_FB_SIMPLE)           += simplefb.o
 
 # the test framebuffer is last
 obj-$(CONFIG_FB_VIRTUAL)          += vfb.o
index 540909de6247b19daabb6fc6affa650fe6a70f9a..effdb373b8db0ffd55d4f56523c8c379f50e4cbc 100644 (file)
@@ -223,8 +223,14 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
 
 static void exit_backlight(struct atmel_lcdfb_info *sinfo)
 {
-       if (sinfo->backlight)
-               backlight_device_unregister(sinfo->backlight);
+       if (!sinfo->backlight)
+               return;
+
+       if (sinfo->backlight->ops) {
+               sinfo->backlight->props.power = FB_BLANK_POWERDOWN;
+               sinfo->backlight->ops->update_status(sinfo->backlight);
+       }
+       backlight_device_unregister(sinfo->backlight);
 }
 
 #else
@@ -461,8 +467,11 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
        if (info->fix.smem_len) {
                unsigned int smem_len = (var->xres_virtual * var->yres_virtual
                                         * ((var->bits_per_pixel + 7) / 8));
-               if (smem_len > info->fix.smem_len)
+               if (smem_len > info->fix.smem_len) {
+                       dev_err(dev, "Frame buffer is too small (%u) for screen size (need at least %u)\n",
+                               info->fix.smem_len, smem_len);
                        return -EINVAL;
+               }
        }
 
        /* Saturate vertical and horizontal timings at maximum values */
index a862e9173ebed67d67cac954a98eb70a9f94c346..48da25c96cd3c2a160077e880f58fed3a52da0f8 100644 (file)
@@ -18,6 +18,8 @@ font-objs-$(CONFIG_FONT_MINI_4x6)  += font_mini_4x6.o
 
 font-objs += $(font-objs-y)
 
+obj-$(CONFIG_FONTS) += font.o
+
 # Each configuration option enables a list of files.
 
 obj-$(CONFIG_DUMMY_CONSOLE)       += dummycon.o
index 60cc6fee654815444c21d46909351f0e15af6b10..c9c2252e371945939dcf0b5e1f667359cd7ba42d 100644 (file)
@@ -53,6 +53,8 @@ static char *def_disp_name;
 module_param_named(def_disp, def_disp_name, charp, 0);
 MODULE_PARM_DESC(def_disp, "default display name");
 
+static bool dss_initialized;
+
 const char *omapdss_get_default_display_name(void)
 {
        return core.default_display_name;
@@ -66,6 +68,12 @@ enum omapdss_version omapdss_get_version(void)
 }
 EXPORT_SYMBOL(omapdss_get_version);
 
+bool omapdss_is_initialized(void)
+{
+       return dss_initialized;
+}
+EXPORT_SYMBOL(omapdss_is_initialized);
+
 struct platform_device *dss_get_core_pdev(void)
 {
        return core.pdev;
@@ -603,6 +611,8 @@ static int __init omap_dss_init(void)
                return r;
        }
 
+       dss_initialized = true;
+
        return 0;
 }
 
@@ -633,7 +643,15 @@ static int __init omap_dss_init(void)
 
 static int __init omap_dss_init2(void)
 {
-       return omap_dss_register_drivers();
+       int r;
+
+       r = omap_dss_register_drivers();
+       if (r)
+               return r;
+
+       dss_initialized = true;
+
+       return 0;
 }
 
 core_initcall(omap_dss_init);
index 17f4d55c621ca446afb7daed1cc08b6b1e6e2035..a109934c0478ea72808eb0205018d0a1253341f0 100644 (file)
@@ -1065,10 +1065,6 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
        mutex_init(&hdmi.ip_data.lock);
 
        res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               DSSERR("can't get IORESOURCE_MEM HDMI\n");
-               return -EINVAL;
-       }
 
        /* Base address taken from platform */
        hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
index c84bb8a4d0c4c1b7b1433fb97f6e55796d067434..856917b3361665137d676fda89f1c0f426ae41c2 100644 (file)
@@ -2416,6 +2416,9 @@ static int omapfb_probe(struct platform_device *pdev)
 
        DBG("omapfb_probe\n");
 
+       if (omapdss_is_initialized() == false)
+               return -EPROBE_DEFER;
+
        if (pdev->num_resources != 0) {
                dev_err(&pdev->dev, "probed for an unknown device\n");
                r = -ENODEV;
index 5261229c79afbef8afa4e80f24936f6dbb6e6f4e..f346b02eee1d84c2d8da77b15e3d64c72aa4fa17 100644 (file)
@@ -353,11 +353,6 @@ static int __init vrfb_probe(struct platform_device *pdev)
        /* first resource is the register res, the rest are vrfb contexts */
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(&pdev->dev, "can't get vrfb base address\n");
-               return -EINVAL;
-       }
-
        vrfb_base = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(vrfb_base))
                return PTR_ERR(vrfb_base);
index d9f08c653d6275ebce317cd8679d92cafdbac5bc..dbfe2c18a4342dbcc37ffb9806a2e000ddfc379c 100644 (file)
@@ -710,7 +710,7 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
        r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
 
        dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
-               info->fix.smem_start + vma->vm_pgoff << PAGE_SHIFT,
+               info->fix.smem_start + (vma->vm_pgoff << PAGE_SHIFT),
                vma->vm_start);
 
        return r;
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c
new file mode 100644 (file)
index 0000000..e2e9e3e
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Simplest possible simple frame-buffer driver, as a platform device
+ *
+ * Copyright (c) 2013, Stephen Warren
+ *
+ * Based on q40fb.c, which was:
+ * Copyright (C) 2001 Richard Zidlicky <rz@linux-m68k.org>
+ *
+ * Also based on offb.c, which was:
+ * Copyright (C) 1997 Geert Uytterhoeven
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct fb_fix_screeninfo simplefb_fix = {
+       .id             = "simple",
+       .type           = FB_TYPE_PACKED_PIXELS,
+       .visual         = FB_VISUAL_TRUECOLOR,
+       .accel          = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo simplefb_var = {
+       .height         = -1,
+       .width          = -1,
+       .activate       = FB_ACTIVATE_NOW,
+       .vmode          = FB_VMODE_NONINTERLACED,
+};
+
+static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+                             u_int transp, struct fb_info *info)
+{
+       u32 *pal = info->pseudo_palette;
+       u32 cr = red >> (16 - info->var.red.length);
+       u32 cg = green >> (16 - info->var.green.length);
+       u32 cb = blue >> (16 - info->var.blue.length);
+       u32 value;
+
+       if (regno >= 16)
+               return -EINVAL;
+
+       value = (cr << info->var.red.offset) |
+               (cg << info->var.green.offset) |
+               (cb << info->var.blue.offset);
+       if (info->var.transp.length > 0) {
+               u32 mask = (1 << info->var.transp.length) - 1;
+               mask <<= info->var.transp.offset;
+               value |= mask;
+       }
+       pal[regno] = value;
+
+       return 0;
+}
+
+static struct fb_ops simplefb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_setcolreg   = simplefb_setcolreg,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+};
+
+struct simplefb_format {
+       const char *name;
+       u32 bits_per_pixel;
+       struct fb_bitfield red;
+       struct fb_bitfield green;
+       struct fb_bitfield blue;
+       struct fb_bitfield transp;
+};
+
+static struct simplefb_format simplefb_formats[] = {
+       { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} },
+};
+
+struct simplefb_params {
+       u32 width;
+       u32 height;
+       u32 stride;
+       struct simplefb_format *format;
+};
+
+static int simplefb_parse_dt(struct platform_device *pdev,
+                          struct simplefb_params *params)
+{
+       struct device_node *np = pdev->dev.of_node;
+       int ret;
+       const char *format;
+       int i;
+
+       ret = of_property_read_u32(np, "width", &params->width);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse width property\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(np, "height", &params->height);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse height property\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(np, "stride", &params->stride);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse stride property\n");
+               return ret;
+       }
+
+       ret = of_property_read_string(np, "format", &format);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse format property\n");
+               return ret;
+       }
+       params->format = NULL;
+       for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) {
+               if (strcmp(format, simplefb_formats[i].name))
+                       continue;
+               params->format = &simplefb_formats[i];
+               break;
+       }
+       if (!params->format) {
+               dev_err(&pdev->dev, "Invalid format value\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int simplefb_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct simplefb_params params;
+       struct fb_info *info;
+       struct resource *mem;
+
+       if (fb_get_options("simplefb", NULL))
+               return -ENODEV;
+
+       ret = simplefb_parse_dt(pdev, &params);
+       if (ret)
+               return ret;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
+               dev_err(&pdev->dev, "No memory resource\n");
+               return -EINVAL;
+       }
+
+       info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev);
+       if (!info)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, info);
+
+       info->fix = simplefb_fix;
+       info->fix.smem_start = mem->start;
+       info->fix.smem_len = resource_size(mem);
+       info->fix.line_length = params.stride;
+
+       info->var = simplefb_var;
+       info->var.xres = params.width;
+       info->var.yres = params.height;
+       info->var.xres_virtual = params.width;
+       info->var.yres_virtual = params.height;
+       info->var.bits_per_pixel = params.format->bits_per_pixel;
+       info->var.red = params.format->red;
+       info->var.green = params.format->green;
+       info->var.blue = params.format->blue;
+       info->var.transp = params.format->transp;
+
+       info->fbops = &simplefb_ops;
+       info->flags = FBINFO_DEFAULT;
+       info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
+                                        info->fix.smem_len);
+       if (!info->screen_base) {
+               framebuffer_release(info);
+               return -ENODEV;
+       }
+       info->pseudo_palette = (void *)(info + 1);
+
+       ret = register_framebuffer(info);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
+               framebuffer_release(info);
+               return ret;
+       }
+
+       dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
+
+       return 0;
+}
+
+static int simplefb_remove(struct platform_device *pdev)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+
+       unregister_framebuffer(info);
+       framebuffer_release(info);
+
+       return 0;
+}
+
+static const struct of_device_id simplefb_of_match[] = {
+       { .compatible = "simple-framebuffer", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, simplefb_of_match);
+
+static struct platform_driver simplefb_driver = {
+       .driver = {
+               .name = "simple-framebuffer",
+               .owner = THIS_MODULE,
+               .of_match_table = simplefb_of_match,
+       },
+       .probe = simplefb_probe,
+       .remove = simplefb_remove,
+};
+module_platform_driver(simplefb_driver);
+
+MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
+MODULE_DESCRIPTION("Simple framebuffer driver");
+MODULE_LICENSE("GPL v2");
index db2390aed38781705deeb179790d8a540d9660eb..6e94d8dd3d00a31d696fc7e140329d9b4dc72c6b 100644 (file)
@@ -555,11 +555,6 @@ static int omap_hdq_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, hdq_data);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_dbg(&pdev->dev, "unable to get resource\n");
-               return -ENXIO;
-       }
-
        hdq_data->hdq_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(hdq_data->hdq_base))
                return PTR_ERR(hdq_data->hdq_base);
index d184c48a0482b6a72fab3db591b5a8c425b531ab..37cb09b27b6328e85581955400cb94bf18b66f0e 100644 (file)
@@ -248,11 +248,6 @@ static int ath79_wdt_probe(struct platform_device *pdev)
                return -EBUSY;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no memory resource found\n");
-               return -EINVAL;
-       }
-
        wdt_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(wdt_base))
                return PTR_ERR(wdt_base);
index 100d4fbfde2adf594203cd5a874dd72f20e06b03..bead7740c86a44da31d3b8dc876b20b51a62cd78 100644 (file)
@@ -217,11 +217,6 @@ static int davinci_wdt_probe(struct platform_device *pdev)
        dev_info(dev, "heartbeat %d sec\n", heartbeat);
 
        wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (wdt_mem == NULL) {
-               dev_err(dev, "failed to get memory region resource\n");
-               return -ENOENT;
-       }
-
        wdt_base = devm_ioremap_resource(dev, wdt_mem);
        if (IS_ERR(wdt_base))
                return PTR_ERR(wdt_base);
index ff908823688cf969e5fd420141827f0cc9a91a39..62946c2cb4f8b1d579f42cd18c5e72a6e4531434 100644 (file)
@@ -257,11 +257,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
        struct resource *res;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "can't get device resources\n");
-               return -ENODEV;
-       }
-
        imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(imx2_wdt.base))
                return PTR_ERR(imx2_wdt.base);
index f03bf501527f64785a3fbbec39963f979d70ad8a..9e02d60a364b00d271efb0b4d4eda046a25fbf08 100644 (file)
@@ -19,11 +19,10 @@ config XEN_SELFBALLOONING
          by the current usage of anonymous memory ("committed AS") and
          controlled by various sysfs-settable parameters.  Configuring
          FRONTSWAP is highly recommended; if it is not configured, self-
-         ballooning is disabled by default but can be enabled with the
-         'selfballooning' kernel boot parameter.  If FRONTSWAP is configured,
+         ballooning is disabled by default. If FRONTSWAP is configured,
          frontswap-selfshrinking is enabled by default but can be disabled
-         with the 'noselfshrink' kernel boot parameter; and self-ballooning
-         is enabled by default but can be disabled with the 'noselfballooning'
+         with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning
+         is enabled by default but can be disabled with the 'tmem.selfballooning=0'
          kernel boot parameter.  Note that systems without a sufficiently
          large swap device should not enable self-ballooning.
 
index a56776dbe0958ece67b1c12a4a13b8be5bdb023c..930fb68179012355952c20d759111a6755b3acbe 100644 (file)
@@ -407,7 +407,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                nr_pages = ARRAY_SIZE(frame_list);
 
        for (i = 0; i < nr_pages; i++) {
-               if ((page = alloc_page(gfp)) == NULL) {
+               page = alloc_page(gfp);
+               if (page == NULL) {
                        nr_pages = i;
                        state = BP_EAGAIN;
                        break;
index ca2b00e9d558c8fea579f4667106eaccee0adb84..2cfc24d76fc54d97d9e8106c5fdf108379b7ba9f 100644 (file)
@@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma)
        struct page **pages = vma->vm_private_data;
        int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
-       if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
+       if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
                return;
 
        xen_unmap_domain_mfn_range(vma, numpgs, pages);
index e3600be4e7fabe54660d5adb1d2f7446b482ff00..0f0493c633717ae4814162f645406db15e16d54e 100644 (file)
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/cleancache.h>
-
-/* temporary ifdef until include/linux/frontswap.h is upstream */
-#ifdef CONFIG_FRONTSWAP
 #include <linux/frontswap.h>
-#endif
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
 #include <asm/xen/hypervisor.h>
 #include <xen/tmem.h>
 
+#ifndef CONFIG_XEN_TMEM_MODULE
+bool __read_mostly tmem_enabled = false;
+
+static int __init enable_tmem(char *s)
+{
+       tmem_enabled = true;
+       return 1;
+}
+__setup("tmem", enable_tmem);
+#endif
+
+#ifdef CONFIG_CLEANCACHE
+static bool cleancache __read_mostly = true;
+module_param(cleancache, bool, S_IRUGO);
+static bool selfballooning __read_mostly = true;
+module_param(selfballooning, bool, S_IRUGO);
+#endif /* CONFIG_CLEANCACHE */
+
+#ifdef CONFIG_FRONTSWAP
+static bool frontswap __read_mostly = true;
+module_param(frontswap, bool, S_IRUGO);
+#else /* CONFIG_FRONTSWAP */
+#define frontswap (0)
+#endif /* CONFIG_FRONTSWAP */
+
+#ifdef CONFIG_XEN_SELFBALLOONING
+static bool selfshrinking __read_mostly = true;
+module_param(selfshrinking, bool, S_IRUGO);
+#endif /* CONFIG_XEN_SELFBALLOONING */
+
 #define TMEM_CONTROL               0
 #define TMEM_NEW_POOL              1
 #define TMEM_DESTROY_POOL          2
@@ -129,16 +155,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
        return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
 }
 
-#ifndef CONFIG_XEN_TMEM_MODULE
-bool __read_mostly tmem_enabled = false;
-
-static int __init enable_tmem(char *s)
-{
-       tmem_enabled = true;
-       return 1;
-}
-__setup("tmem", enable_tmem);
-#endif
 
 #ifdef CONFIG_CLEANCACHE
 static int xen_tmem_destroy_pool(u32 pool_id)
@@ -230,20 +246,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
-static bool disable_cleancache __read_mostly;
-static bool disable_selfballooning __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_cleancache, bool, S_IRUGO);
-module_param(disable_selfballooning, bool, S_IRUGO);
-#else
-static int __init no_cleancache(char *s)
-{
-       disable_cleancache = true;
-       return 1;
-}
-__setup("nocleancache", no_cleancache);
-#endif
-
 static struct cleancache_ops tmem_cleancache_ops = {
        .put_page = tmem_cleancache_put_page,
        .get_page = tmem_cleancache_get_page,
@@ -361,20 +363,6 @@ static void tmem_frontswap_init(unsigned ignored)
                    xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
 }
 
-static bool disable_frontswap __read_mostly;
-static bool disable_frontswap_selfshrinking __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_frontswap, bool, S_IRUGO);
-module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
-#else
-static int __init no_frontswap(char *s)
-{
-       disable_frontswap = true;
-       return 1;
-}
-__setup("nofrontswap", no_frontswap);
-#endif
-
 static struct frontswap_ops tmem_frontswap_ops = {
        .store = tmem_frontswap_store,
        .load = tmem_frontswap_load,
@@ -382,8 +370,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
        .invalidate_area = tmem_frontswap_flush_area,
        .init = tmem_frontswap_init
 };
-#else  /* CONFIG_FRONTSWAP */
-#define disable_frontswap_selfshrinking 1
 #endif
 
 static int xen_tmem_init(void)
@@ -391,12 +377,12 @@ static int xen_tmem_init(void)
        if (!xen_domain())
                return 0;
 #ifdef CONFIG_FRONTSWAP
-       if (tmem_enabled && !disable_frontswap) {
+       if (tmem_enabled && frontswap) {
                char *s = "";
-               struct frontswap_ops *old_ops =
-                       frontswap_register_ops(&tmem_frontswap_ops);
+               struct frontswap_ops *old_ops;
 
                tmem_frontswap_poolid = -1;
+               old_ops = frontswap_register_ops(&tmem_frontswap_ops);
                if (IS_ERR(old_ops) || old_ops) {
                        if (IS_ERR(old_ops))
                                return PTR_ERR(old_ops);
@@ -408,7 +394,7 @@ static int xen_tmem_init(void)
 #endif
 #ifdef CONFIG_CLEANCACHE
        BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
-       if (tmem_enabled && !disable_cleancache) {
+       if (tmem_enabled && cleancache) {
                char *s = "";
                struct cleancache_ops *old_ops =
                        cleancache_register_ops(&tmem_cleancache_ops);
@@ -419,8 +405,15 @@ static int xen_tmem_init(void)
        }
 #endif
 #ifdef CONFIG_XEN_SELFBALLOONING
-       xen_selfballoon_init(!disable_selfballooning,
-                               !disable_frontswap_selfshrinking);
+       /*
+        * There is no point of driving pages to the swap system if they
+        * aren't going anywhere in tmem universe.
+        */
+       if (!frontswap) {
+               selfshrinking = false;
+               selfballooning = false;
+       }
+       xen_selfballoon_init(selfballooning, selfshrinking);
 #endif
        return 0;
 }
index a2278ba7fb273a523476e0504388e49e48511093..4e8ba38aa0c9cb0499ecdf27172baf44d55a65b5 100644 (file)
@@ -106,7 +106,7 @@ static void pcistub_device_release(struct kref *kref)
        else
                pci_restore_state(dev);
 
-       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+       if (dev->msix_cap) {
                struct physdev_pci_device ppdev = {
                        .seg = pci_domain_nr(dev->bus),
                        .bus = dev->bus->number,
@@ -371,7 +371,7 @@ static int pcistub_init_device(struct pci_dev *dev)
        if (err)
                goto config_release;
 
-       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+       if (dev->msix_cap) {
                struct physdev_pci_device ppdev = {
                        .seg = pci_domain_nr(dev->bus),
                        .bus = dev->bus->number,
index f2ef569c7cc147ad5865a3630eb4913a7164d262..f70984a892aa5c16a0e24deaee28acb2497dc9f6 100644 (file)
  * System configuration note: Selfballooning should not be enabled on
  * systems without a sufficiently large swap device configured; for best
  * results, it is recommended that total swap be increased by the size
- * of the guest memory.  Also, while technically not required to be
- * configured, it is highly recommended that frontswap also be configured
- * and enabled when selfballooning is running.  So, selfballooning
- * is disabled by default if frontswap is not configured and can only
- * be enabled with the "selfballooning" kernel boot option; similarly
- * selfballooning is enabled by default if frontswap is configured and
- * can be disabled with the "noselfballooning" kernel boot option.  Finally,
- * when frontswap is configured, frontswap-selfshrinking can be disabled
- * with the "noselfshrink" kernel boot option.
+ * of the guest memory. Note, that selfballooning should be disabled by default
+ * if frontswap is not configured.  Similarly selfballooning should be enabled
+ * by default if frontswap is configured and can be disabled with the
+ * "tmem.selfballooning=0" kernel boot option.  Finally, when frontswap is
+ * configured, frontswap-selfshrinking can be disabled  with the
+ * "tmem.selfshrink=0" kernel boot option.
  *
  * Selfballooning is disallowed in domain0 and force-disabled.
  *
@@ -120,9 +117,6 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
 /* Enable/disable with sysfs. */
 static bool frontswap_selfshrinking __read_mostly;
 
-/* Enable/disable with kernel boot option. */
-static bool use_frontswap_selfshrink = true;
-
 /*
  * The default values for the following parameters were deemed reasonable
  * by experimentation, may be workload-dependent, and can all be
@@ -176,35 +170,6 @@ static void frontswap_selfshrink(void)
        frontswap_shrink(tgt_frontswap_pages);
 }
 
-static int __init xen_nofrontswap_selfshrink_setup(char *s)
-{
-       use_frontswap_selfshrink = false;
-       return 1;
-}
-
-__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
-
-/* Disable with kernel boot option. */
-static bool use_selfballooning = true;
-
-static int __init xen_noselfballooning_setup(char *s)
-{
-       use_selfballooning = false;
-       return 1;
-}
-
-__setup("noselfballooning", xen_noselfballooning_setup);
-#else /* !CONFIG_FRONTSWAP */
-/* Enable with kernel boot option. */
-static bool use_selfballooning;
-
-static int __init xen_selfballooning_setup(char *s)
-{
-       use_selfballooning = true;
-       return 1;
-}
-
-__setup("selfballooning", xen_selfballooning_setup);
 #endif /* CONFIG_FRONTSWAP */
 
 #define MB2PAGES(mb)   ((mb) << (20 - PAGE_SHIFT))
index 61786be9138b96877ccadce65642a4590eddead1..ec097d6f964dab5e695ff9abdf818581b3473e74 100644 (file)
@@ -534,7 +534,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
 
        err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
        if (err)
-               goto out_err;
+               goto out_err_free_ballooned_pages;
 
        spin_lock(&xenbus_valloc_lock);
        list_add(&node->next, &xenbus_valloc_pages);
@@ -543,8 +543,9 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        *vaddr = addr;
        return 0;
 
- out_err:
+ out_err_free_ballooned_pages:
        free_xenballooned_pages(1, &node->page);
+ out_err:
        kfree(node);
        return err;
 }
index c8abd3b8a6c48087967ddcfb2ac012bf642a0e10..e74f9c1fbd80a9fe4c6cfbba0176785b005c06fd 100644 (file)
@@ -45,6 +45,7 @@ int xb_wait_for_data_to_read(void);
 int xs_input_avail(void);
 extern struct xenstore_domain_interface *xen_store_interface;
 extern int xen_store_evtchn;
+extern enum xenstore_init xen_store_domain_type;
 
 extern const struct file_operations xen_xenbus_fops;
 
index d730008007624b4227780457a96c77f8662c248e..a6f42fc01407e435c477862d380e8a1da55d6656 100644 (file)
@@ -70,22 +70,21 @@ static long xenbus_alloc(domid_t domid)
        return err;
 }
 
-static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+static long xenbus_backend_ioctl(struct file *file, unsigned int cmd,
+                                unsigned long data)
 {
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
        switch (cmd) {
-               case IOCTL_XENBUS_BACKEND_EVTCHN:
-                       if (xen_store_evtchn > 0)
-                               return xen_store_evtchn;
-                       return -ENODEV;
-
-               case IOCTL_XENBUS_BACKEND_SETUP:
-                       return xenbus_alloc(data);
-
-               default:
-                       return -ENOTTY;
+       case IOCTL_XENBUS_BACKEND_EVTCHN:
+               if (xen_store_evtchn > 0)
+                       return xen_store_evtchn;
+               return -ENODEV;
+       case IOCTL_XENBUS_BACKEND_SETUP:
+               return xenbus_alloc(data);
+       default:
+               return -ENOTTY;
        }
 }
 
index 3325884c693f8f1db3d94e43be43a622b857af2d..56cfaaa9d006ab1b86c47d6c1a9f1ab90268f399 100644 (file)
@@ -69,6 +69,9 @@ EXPORT_SYMBOL_GPL(xen_store_evtchn);
 struct xenstore_domain_interface *xen_store_interface;
 EXPORT_SYMBOL_GPL(xen_store_interface);
 
+enum xenstore_init xen_store_domain_type;
+EXPORT_SYMBOL_GPL(xen_store_domain_type);
+
 static unsigned long xen_store_mfn;
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
@@ -719,17 +722,11 @@ static int __init xenstored_local_init(void)
        return err;
 }
 
-enum xenstore_init {
-       UNKNOWN,
-       PV,
-       HVM,
-       LOCAL,
-};
 static int __init xenbus_init(void)
 {
        int err = 0;
-       enum xenstore_init usage = UNKNOWN;
        uint64_t v = 0;
+       xen_store_domain_type = XS_UNKNOWN;
 
        if (!xen_domain())
                return -ENODEV;
@@ -737,29 +734,29 @@ static int __init xenbus_init(void)
        xenbus_ring_ops_init();
 
        if (xen_pv_domain())
-               usage = PV;
+               xen_store_domain_type = XS_PV;
        if (xen_hvm_domain())
-               usage = HVM;
+               xen_store_domain_type = XS_HVM;
        if (xen_hvm_domain() && xen_initial_domain())
-               usage = LOCAL;
+               xen_store_domain_type = XS_LOCAL;
        if (xen_pv_domain() && !xen_start_info->store_evtchn)
-               usage = LOCAL;
+               xen_store_domain_type = XS_LOCAL;
        if (xen_pv_domain() && xen_start_info->store_evtchn)
                xenstored_ready = 1;
 
-       switch (usage) {
-       case LOCAL:
+       switch (xen_store_domain_type) {
+       case XS_LOCAL:
                err = xenstored_local_init();
                if (err)
                        goto out_error;
                xen_store_interface = mfn_to_virt(xen_store_mfn);
                break;
-       case PV:
+       case XS_PV:
                xen_store_evtchn = xen_start_info->store_evtchn;
                xen_store_mfn = xen_start_info->store_mfn;
                xen_store_interface = mfn_to_virt(xen_store_mfn);
                break;
-       case HVM:
+       case XS_HVM:
                err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
                if (err)
                        goto out_error;
index bb4f92ed87305452a3f2a5ad8a25293c75da352b..146f857a36f83b5a2f4646424c1251a23abf1b07 100644 (file)
@@ -47,6 +47,13 @@ struct xen_bus_type {
        struct bus_type bus;
 };
 
+enum xenstore_init {
+       XS_UNKNOWN,
+       XS_PV,
+       XS_HVM,
+       XS_LOCAL,
+};
+
 extern struct device_attribute xenbus_dev_attrs[];
 
 extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
index 3159a37d966d57755b6bd6d0d2a19379309c7e6e..a7e25073de19bc3b70e95b2a8697792f62312b63 100644 (file)
@@ -29,6 +29,8 @@
 #include "xenbus_probe.h"
 
 
+static struct workqueue_struct *xenbus_frontend_wq;
+
 /* device/<type>/<id> => <type>-<id> */
 static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
 {
@@ -89,9 +91,40 @@ static void backend_changed(struct xenbus_watch *watch,
        xenbus_otherend_changed(watch, vec, len, 1);
 }
 
+static void xenbus_frontend_delayed_resume(struct work_struct *w)
+{
+       struct xenbus_device *xdev = container_of(w, struct xenbus_device, work);
+
+       xenbus_dev_resume(&xdev->dev);
+}
+
+static int xenbus_frontend_dev_resume(struct device *dev)
+{
+       /*
+        * If xenstored is running in this domain, we cannot access the backend
+        * state at the moment, so we need to defer xenbus_dev_resume
+        */
+       if (xen_store_domain_type == XS_LOCAL) {
+               struct xenbus_device *xdev = to_xenbus_device(dev);
+
+               if (!xenbus_frontend_wq) {
+                       pr_err("%s: no workqueue to process delayed resume\n",
+                              xdev->nodename);
+                       return -EFAULT;
+               }
+
+               INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
+               queue_work(xenbus_frontend_wq, &xdev->work);
+
+               return 0;
+       }
+
+       return xenbus_dev_resume(dev);
+}
+
 static const struct dev_pm_ops xenbus_pm_ops = {
        .suspend        = xenbus_dev_suspend,
-       .resume         = xenbus_dev_resume,
+       .resume         = xenbus_frontend_dev_resume,
        .freeze         = xenbus_dev_suspend,
        .thaw           = xenbus_dev_cancel,
        .restore        = xenbus_dev_resume,
@@ -440,6 +473,8 @@ static int __init xenbus_probe_frontend_init(void)
 
        register_xenstore_notifier(&xenstore_notifier);
 
+       xenbus_frontend_wq = create_workqueue("xenbus_frontend");
+
        return 0;
 }
 subsys_initcall(xenbus_probe_frontend_init);
index c5b1a8c10411ab108960eb74ed20f1b1a4bed601..2bbcacf74d0c64f8814cb68d8c4eff423dfe98d4 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx)
        for (i = 0; i < ctx->nr_pages; i++)
                put_page(ctx->ring_pages[i]);
 
-       if (ctx->mmap_size)
-               vm_munmap(ctx->mmap_base, ctx->mmap_size);
-
        if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
                kfree(ctx->ring_pages);
 }
@@ -307,7 +304,9 @@ static void free_ioctx(struct kioctx *ctx)
        kunmap_atomic(ring);
 
        while (atomic_read(&ctx->reqs_active) > 0) {
-               wait_event(ctx->wait, head != ctx->tail);
+               wait_event(ctx->wait,
+                               head != ctx->tail ||
+                               atomic_read(&ctx->reqs_active) <= 0);
 
                avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
 
@@ -320,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx)
 
        aio_free_ring(ctx);
 
-       spin_lock(&aio_nr_lock);
-       BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
-       aio_nr -= ctx->max_reqs;
-       spin_unlock(&aio_nr_lock);
-
        pr_debug("freeing %p\n", ctx);
 
        /*
@@ -433,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx)
 {
        if (!atomic_xchg(&ctx->dead, 1)) {
                hlist_del_rcu(&ctx->list);
-               /* Between hlist_del_rcu() and dropping the initial ref */
-               synchronize_rcu();
 
                /*
-                * We can't punt to workqueue here because put_ioctx() ->
-                * free_ioctx() will unmap the ringbuffer, and that has to be
-                * done in the original process's context. kill_ioctx_rcu/work()
-                * exist for exit_aio(), as in that path free_ioctx() won't do
-                * the unmap.
+                * It'd be more correct to do this in free_ioctx(), after all
+                * the outstanding kiocbs have finished - but by then io_destroy
+                * has already returned, so io_setup() could potentially return
+                * -EAGAIN with no ioctxs actually in use (as far as userspace
+                *  could tell).
                 */
-               kill_ioctx_work(&ctx->rcu_work);
+               spin_lock(&aio_nr_lock);
+               BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
+               aio_nr -= ctx->max_reqs;
+               spin_unlock(&aio_nr_lock);
+
+               if (ctx->mmap_size)
+                       vm_munmap(ctx->mmap_base, ctx->mmap_size);
+
+               /* Between hlist_del_rcu() and dropping the initial ref */
+               call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
        }
 }
 
@@ -493,10 +494,7 @@ void exit_aio(struct mm_struct *mm)
                 */
                ctx->mmap_size = 0;
 
-               if (!atomic_xchg(&ctx->dead, 1)) {
-                       hlist_del_rcu(&ctx->list);
-                       call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
-               }
+               kill_ioctx(ctx);
        }
 }
 
@@ -1299,8 +1297,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
  *     < min_nr if the timeout specified by timeout has elapsed
  *     before sufficient events are available, where timeout == NULL
  *     specifies an infinite timeout. Note that the timeout pointed to by
- *     timeout is relative and will be updated if not NULL and the
- *     operation blocks. Will fail with -ENOSYS if not implemented.
+ *     timeout is relative.  Will fail with -ENOSYS if not implemented.
  */
 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
                long, min_nr,
index 8615ee89ab55d8c6d96a26556327b31c3967c0b6..f95dddced968f6f4509f1536b80462baa1c4393b 100644 (file)
@@ -265,8 +265,8 @@ befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                result = filldir(dirent, keybuf, keysize, filp->f_pos,
                                 (ino_t) value, d_type);
        }
-
-       filp->f_pos++;
+       if (!result)
+               filp->f_pos++;
 
        befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
 
index b4fb41558111b75d461096d8acdfca34690b8728..290e347b6db3f925f414fd9be4e6ea394da6f887 100644 (file)
@@ -918,7 +918,8 @@ again:
                                                           ref->parent, bsz, 0);
                                if (!eb || !extent_buffer_uptodate(eb)) {
                                        free_extent_buffer(eb);
-                                       return -EIO;
+                                       ret = -EIO;
+                                       goto out;
                                }
                                ret = find_extent_in_eb(eb, bytenr,
                                                        *extent_item_pos, &eie);
index 18af6f48781a1f31e1d41c23bb08a2e1b6ea12a7..1431a696501704d3f9e0901c64de537b2b20183a 100644 (file)
@@ -1700,7 +1700,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                unsigned int j;
                DECLARE_COMPLETION_ONSTACK(complete);
 
-               bio = bio_alloc(GFP_NOFS, num_pages - i);
+               bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
                if (!bio) {
                        printk(KERN_INFO
                               "btrfsic: bio_alloc() for %u pages failed!\n",
index de6de8e60b46019f528b93505d3848adb8699191..02fae7f7e42cb417a14fe0243805bcad4270b592 100644 (file)
@@ -951,10 +951,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        BUG_ON(ret); /* -ENOMEM */
                }
                if (new_flags != 0) {
+                       int level = btrfs_header_level(buf);
+
                        ret = btrfs_set_disk_extent_flags(trans, root,
                                                          buf->start,
                                                          buf->len,
-                                                         new_flags, 0);
+                                                         new_flags, level, 0);
                        if (ret)
                                return ret;
                }
index 63c328a9ce956c716c8ed6e1a0131b5260bb022b..d6dd49b51ba8dfe27069a139ab42cfd88e58c117 100644 (file)
@@ -88,12 +88,12 @@ struct btrfs_ordered_sum;
 /* holds checksums of all the data extents */
 #define BTRFS_CSUM_TREE_OBJECTID 7ULL
 
-/* for storing balance parameters in the root tree */
-#define BTRFS_BALANCE_OBJECTID -4ULL
-
 /* holds quota configuration and tracking */
 #define BTRFS_QUOTA_TREE_OBJECTID 8ULL
 
+/* for storing balance parameters in the root tree */
+#define BTRFS_BALANCE_OBJECTID -4ULL
+
 /* orhpan objectid for tracking unlinked/truncated files */
 #define BTRFS_ORPHAN_OBJECTID -5ULL
 
@@ -3075,7 +3075,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 flags,
-                               int is_data);
+                               int level, int is_data);
 int btrfs_free_extent(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root,
                      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
index f75fcaf79aebd11d54ebcc6df94d6f5b67c55dfe..70b962cc177d973688b0d2deb56965bffbc423ae 100644 (file)
@@ -60,6 +60,7 @@ struct btrfs_delayed_ref_node {
 struct btrfs_delayed_extent_op {
        struct btrfs_disk_key key;
        u64 flags_to_set;
+       int level;
        unsigned int update_key:1;
        unsigned int update_flags:1;
        unsigned int is_data:1;
index 7ba7b3900cb8eb749b82241cc4533b4fc978ef8a..65241f32d3f8aec282a80d443c7acc677f68726a 100644 (file)
@@ -313,6 +313,11 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
        struct btrfs_device *tgt_device = NULL;
        struct btrfs_device *src_device = NULL;
 
+       if (btrfs_fs_incompat(fs_info, RAID56)) {
+               pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n");
+               return -EINVAL;
+       }
+
        switch (args->start.cont_reading_from_srcdev_mode) {
        case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
        case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
index 4e9ebe1f1827895712afdea8e0e8016bd20088ca..b8b60b660c8f833cb38bf93823e1a3738e32669a 100644 (file)
@@ -152,7 +152,7 @@ static struct btrfs_lockdep_keyset {
        { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
        { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
        { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
-       { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
+       { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
@@ -1513,7 +1513,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
        }
 
        root->commit_root = btrfs_root_node(root);
-       BUG_ON(!root->node); /* -ENOMEM */
 out:
        if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
                root->ref_cows = 1;
@@ -1988,30 +1987,33 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
 {
        free_extent_buffer(info->tree_root->node);
        free_extent_buffer(info->tree_root->commit_root);
-       free_extent_buffer(info->dev_root->node);
-       free_extent_buffer(info->dev_root->commit_root);
-       free_extent_buffer(info->extent_root->node);
-       free_extent_buffer(info->extent_root->commit_root);
-       free_extent_buffer(info->csum_root->node);
-       free_extent_buffer(info->csum_root->commit_root);
-       if (info->quota_root) {
-               free_extent_buffer(info->quota_root->node);
-               free_extent_buffer(info->quota_root->commit_root);
-       }
-
        info->tree_root->node = NULL;
        info->tree_root->commit_root = NULL;
-       info->dev_root->node = NULL;
-       info->dev_root->commit_root = NULL;
-       info->extent_root->node = NULL;
-       info->extent_root->commit_root = NULL;
-       info->csum_root->node = NULL;
-       info->csum_root->commit_root = NULL;
+
+       if (info->dev_root) {
+               free_extent_buffer(info->dev_root->node);
+               free_extent_buffer(info->dev_root->commit_root);
+               info->dev_root->node = NULL;
+               info->dev_root->commit_root = NULL;
+       }
+       if (info->extent_root) {
+               free_extent_buffer(info->extent_root->node);
+               free_extent_buffer(info->extent_root->commit_root);
+               info->extent_root->node = NULL;
+               info->extent_root->commit_root = NULL;
+       }
+       if (info->csum_root) {
+               free_extent_buffer(info->csum_root->node);
+               free_extent_buffer(info->csum_root->commit_root);
+               info->csum_root->node = NULL;
+               info->csum_root->commit_root = NULL;
+       }
        if (info->quota_root) {
+               free_extent_buffer(info->quota_root->node);
+               free_extent_buffer(info->quota_root->commit_root);
                info->quota_root->node = NULL;
                info->quota_root->commit_root = NULL;
        }
-
        if (chunk_root) {
                free_extent_buffer(info->chunk_root->node);
                free_extent_buffer(info->chunk_root->commit_root);
@@ -2857,8 +2859,8 @@ fail_qgroup:
        btrfs_free_qgroup_config(fs_info);
 fail_trans_kthread:
        kthread_stop(fs_info->transaction_kthread);
-       del_fs_roots(fs_info);
        btrfs_cleanup_transaction(fs_info->tree_root);
+       del_fs_roots(fs_info);
 fail_cleaner:
        kthread_stop(fs_info->cleaner_kthread);
 
@@ -3128,7 +3130,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
         * caller
         */
        device->flush_bio = NULL;
-       bio = bio_alloc(GFP_NOFS, 0);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
        if (!bio)
                return -ENOMEM;
 
@@ -3510,15 +3512,15 @@ int close_ctree(struct btrfs_root *root)
                       percpu_counter_sum(&fs_info->delalloc_bytes));
        }
 
-       free_root_pointers(fs_info, 1);
-
        btrfs_free_block_groups(fs_info);
 
+       btrfs_stop_all_workers(fs_info);
+
        del_fs_roots(fs_info);
 
-       iput(fs_info->btree_inode);
+       free_root_pointers(fs_info, 1);
 
-       btrfs_stop_all_workers(fs_info);
+       iput(fs_info->btree_inode);
 
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
        if (btrfs_test_opt(root, CHECK_INTEGRITY))
@@ -3659,8 +3661,11 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
                                         ordered_operations);
 
                list_del_init(&btrfs_inode->ordered_operations);
+               spin_unlock(&root->fs_info->ordered_extent_lock);
 
                btrfs_invalidate_inodes(btrfs_inode->root);
+
+               spin_lock(&root->fs_info->ordered_extent_lock);
        }
 
        spin_unlock(&root->fs_info->ordered_extent_lock);
@@ -3782,8 +3787,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
                list_del_init(&btrfs_inode->delalloc_inodes);
                clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
                          &btrfs_inode->runtime_flags);
+               spin_unlock(&root->fs_info->delalloc_lock);
 
                btrfs_invalidate_inodes(btrfs_inode->root);
+
+               spin_lock(&root->fs_info->delalloc_lock);
        }
 
        spin_unlock(&root->fs_info->delalloc_lock);
@@ -3808,7 +3816,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                while (start <= end) {
                        eb = btrfs_find_tree_block(root, start,
                                                   root->leafsize);
-                       start += eb->len;
+                       start += root->leafsize;
                        if (!eb)
                                continue;
                        wait_on_extent_buffer_writeback(eb);
index 2305b5c5cf0012d77de5dde14721ddc1cc0ccc5c..df472ab1b5acca7b411b05bcc414913bcebcc244 100644 (file)
@@ -2070,8 +2070,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
        u32 item_size;
        int ret;
        int err = 0;
-       int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
-                       node->type == BTRFS_SHARED_BLOCK_REF_KEY);
+       int metadata = !extent_op->is_data;
 
        if (trans->aborted)
                return 0;
@@ -2086,11 +2085,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
        key.objectid = node->bytenr;
 
        if (metadata) {
-               struct btrfs_delayed_tree_ref *tree_ref;
-
-               tree_ref = btrfs_delayed_node_to_tree_ref(node);
                key.type = BTRFS_METADATA_ITEM_KEY;
-               key.offset = tree_ref->level;
+               key.offset = extent_op->level;
        } else {
                key.type = BTRFS_EXTENT_ITEM_KEY;
                key.offset = node->num_bytes;
@@ -2719,7 +2715,7 @@ out:
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 flags,
-                               int is_data)
+                               int level, int is_data)
 {
        struct btrfs_delayed_extent_op *extent_op;
        int ret;
@@ -2732,6 +2728,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        extent_op->update_flags = 1;
        extent_op->update_key = 0;
        extent_op->is_data = is_data ? 1 : 0;
+       extent_op->level = level;
 
        ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
                                          num_bytes, extent_op);
@@ -3109,6 +3106,11 @@ again:
        WARN_ON(ret);
 
        if (i_size_read(inode) > 0) {
+               ret = btrfs_check_trunc_cache_free_space(root,
+                                       &root->fs_info->global_block_rsv);
+               if (ret)
+                       goto out_put;
+
                ret = btrfs_truncate_free_space_cache(root, trans, path,
                                                      inode);
                if (ret)
@@ -4562,6 +4564,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
        fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
+       if (fs_info->quota_root)
+               fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
 
        update_global_block_rsv(fs_info);
@@ -6651,51 +6655,51 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        struct btrfs_block_rsv *block_rsv;
        struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
        int ret;
+       bool global_updated = false;
 
        block_rsv = get_block_rsv(trans, root);
 
-       if (block_rsv->size == 0) {
-               ret = reserve_metadata_bytes(root, block_rsv, blocksize,
-                                            BTRFS_RESERVE_NO_FLUSH);
-               /*
-                * If we couldn't reserve metadata bytes try and use some from
-                * the global reserve.
-                */
-               if (ret && block_rsv != global_rsv) {
-                       ret = block_rsv_use_bytes(global_rsv, blocksize);
-                       if (!ret)
-                               return global_rsv;
-                       return ERR_PTR(ret);
-               } else if (ret) {
-                       return ERR_PTR(ret);
-               }
+       if (unlikely(block_rsv->size == 0))
+               goto try_reserve;
+again:
+       ret = block_rsv_use_bytes(block_rsv, blocksize);
+       if (!ret)
                return block_rsv;
+
+       if (block_rsv->failfast)
+               return ERR_PTR(ret);
+
+       if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
+               global_updated = true;
+               update_global_block_rsv(root->fs_info);
+               goto again;
        }
 
-       ret = block_rsv_use_bytes(block_rsv, blocksize);
+       if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+               static DEFINE_RATELIMIT_STATE(_rs,
+                               DEFAULT_RATELIMIT_INTERVAL * 10,
+                               /*DEFAULT_RATELIMIT_BURST*/ 1);
+               if (__ratelimit(&_rs))
+                       WARN(1, KERN_DEBUG
+                               "btrfs: block rsv returned %d\n", ret);
+       }
+try_reserve:
+       ret = reserve_metadata_bytes(root, block_rsv, blocksize,
+                                    BTRFS_RESERVE_NO_FLUSH);
        if (!ret)
                return block_rsv;
-       if (ret && !block_rsv->failfast) {
-               if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
-                       static DEFINE_RATELIMIT_STATE(_rs,
-                                       DEFAULT_RATELIMIT_INTERVAL * 10,
-                                       /*DEFAULT_RATELIMIT_BURST*/ 1);
-                       if (__ratelimit(&_rs))
-                               WARN(1, KERN_DEBUG
-                                       "btrfs: block rsv returned %d\n", ret);
-               }
-               ret = reserve_metadata_bytes(root, block_rsv, blocksize,
-                                            BTRFS_RESERVE_NO_FLUSH);
-               if (!ret) {
-                       return block_rsv;
-               } else if (ret && block_rsv != global_rsv) {
-                       ret = block_rsv_use_bytes(global_rsv, blocksize);
-                       if (!ret)
-                               return global_rsv;
-               }
+       /*
+        * If we couldn't reserve metadata bytes try and use some from
+        * the global reserve if its space type is the same as the global
+        * reservation.
+        */
+       if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
+           block_rsv->space_info == global_rsv->space_info) {
+               ret = block_rsv_use_bytes(global_rsv, blocksize);
+               if (!ret)
+                       return global_rsv;
        }
-
-       return ERR_PTR(-ENOSPC);
+       return ERR_PTR(ret);
 }
 
 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
@@ -6763,6 +6767,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                        extent_op->update_key = 1;
                extent_op->update_flags = 1;
                extent_op->is_data = 0;
+               extent_op->level = level;
 
                ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
                                        ins.objectid,
@@ -6934,7 +6939,8 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
                ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
                BUG_ON(ret); /* -ENOMEM */
                ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
-                                                 eb->len, flag, 0);
+                                                 eb->len, flag,
+                                                 btrfs_header_level(eb), 0);
                BUG_ON(ret); /* -ENOMEM */
                wc->flags[level] |= flag;
        }
index 32d67a822e93c0cf53f5c0409bd73ff863bf90bc..e7e7afb4a87268211e8b0ef881a6eeac0068eefd 100644 (file)
@@ -23,6 +23,7 @@
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
+static struct bio_set *btrfs_bioset;
 
 #ifdef CONFIG_BTRFS_DEBUG
 static LIST_HEAD(buffers);
@@ -125,10 +126,20 @@ int __init extent_io_init(void)
                        SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
        if (!extent_buffer_cache)
                goto free_state_cache;
+
+       btrfs_bioset = bioset_create(BIO_POOL_SIZE,
+                                    offsetof(struct btrfs_io_bio, bio));
+       if (!btrfs_bioset)
+               goto free_buffer_cache;
        return 0;
 
+free_buffer_cache:
+       kmem_cache_destroy(extent_buffer_cache);
+       extent_buffer_cache = NULL;
+
 free_state_cache:
        kmem_cache_destroy(extent_state_cache);
+       extent_state_cache = NULL;
        return -ENOMEM;
 }
 
@@ -145,6 +156,8 @@ void extent_io_exit(void)
                kmem_cache_destroy(extent_state_cache);
        if (extent_buffer_cache)
                kmem_cache_destroy(extent_buffer_cache);
+       if (btrfs_bioset)
+               bioset_free(btrfs_bioset);
 }
 
 void extent_io_tree_init(struct extent_io_tree *tree,
@@ -1947,28 +1960,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
                SetPageUptodate(page);
 }
 
-/*
- * helper function to unlock a page if all the extents in the tree
- * for that page are unlocked
- */
-static void check_page_locked(struct extent_io_tree *tree, struct page *page)
-{
-       u64 start = page_offset(page);
-       u64 end = start + PAGE_CACHE_SIZE - 1;
-       if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
-               unlock_page(page);
-}
-
-/*
- * helper function to end page writeback if all the extents
- * in the tree for that page are done with writeback
- */
-static void check_page_writeback(struct extent_io_tree *tree,
-                                struct page *page)
-{
-       end_page_writeback(page);
-}
-
 /*
  * When IO fails, either with EIO or csum verification fails, we
  * try other mirrors that might have a good copy of the data.  This
@@ -2046,7 +2037,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
                return 0;
 
-       bio = bio_alloc(GFP_NOFS, 1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio)
                return -EIO;
        bio->bi_private = &compl;
@@ -2336,7 +2327,7 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
                return -EIO;
        }
 
-       bio = bio_alloc(GFP_NOFS, 1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio) {
                free_io_failure(inode, failrec, 0);
                return -EIO;
@@ -2398,19 +2389,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
        struct extent_io_tree *tree;
        u64 start;
        u64 end;
-       int whole_page;
 
        do {
                struct page *page = bvec->bv_page;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-               start = page_offset(page) + bvec->bv_offset;
-               end = start + bvec->bv_len - 1;
+               /* We always issue full-page reads, but if some block
+                * in a page fails to read, blk_update_request() will
+                * advance bv_offset and adjust bv_len to compensate.
+                * Print a warning for nonzero offsets, and an error
+                * if they don't add up to a full page.  */
+               if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
+                       printk("%s page write in btrfs with offset %u and length %u\n",
+                              bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
+                              ? KERN_ERR "partial" : KERN_INFO "incomplete",
+                              bvec->bv_offset, bvec->bv_len);
 
-               if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
-                       whole_page = 1;
-               else
-                       whole_page = 0;
+               start = page_offset(page);
+               end = start + bvec->bv_offset + bvec->bv_len - 1;
 
                if (--bvec >= bio->bi_io_vec)
                        prefetchw(&bvec->bv_page->flags);
@@ -2418,10 +2414,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
                if (end_extent_writepage(page, err, start, end))
                        continue;
 
-               if (whole_page)
-                       end_page_writeback(page);
-               else
-                       check_page_writeback(tree, page);
+               end_page_writeback(page);
        } while (bvec >= bio->bi_io_vec);
 
        bio_put(bio);
@@ -2446,7 +2439,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
        struct extent_io_tree *tree;
        u64 start;
        u64 end;
-       int whole_page;
        int mirror;
        int ret;
 
@@ -2457,19 +2449,26 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                struct page *page = bvec->bv_page;
                struct extent_state *cached = NULL;
                struct extent_state *state;
+               struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 
                pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-                        "mirror=%ld\n", (u64)bio->bi_sector, err,
-                        (long int)bio->bi_bdev);
+                        "mirror=%lu\n", (u64)bio->bi_sector, err,
+                        io_bio->mirror_num);
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-               start = page_offset(page) + bvec->bv_offset;
-               end = start + bvec->bv_len - 1;
+               /* We always issue full-page reads, but if some block
+                * in a page fails to read, blk_update_request() will
+                * advance bv_offset and adjust bv_len to compensate.
+                * Print a warning for nonzero offsets, and an error
+                * if they don't add up to a full page.  */
+               if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
+                       printk("%s page read in btrfs with offset %u and length %u\n",
+                              bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
+                              ? KERN_ERR "partial" : KERN_INFO "incomplete",
+                              bvec->bv_offset, bvec->bv_len);
 
-               if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
-                       whole_page = 1;
-               else
-                       whole_page = 0;
+               start = page_offset(page);
+               end = start + bvec->bv_offset + bvec->bv_len - 1;
 
                if (++bvec <= bvec_end)
                        prefetchw(&bvec->bv_page->flags);
@@ -2485,7 +2484,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                }
                spin_unlock(&tree->lock);
 
-               mirror = (int)(unsigned long)bio->bi_bdev;
+               mirror = io_bio->mirror_num;
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
                                                              state, mirror);
@@ -2528,39 +2527,35 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                }
                unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
-               if (whole_page) {
-                       if (uptodate) {
-                               SetPageUptodate(page);
-                       } else {
-                               ClearPageUptodate(page);
-                               SetPageError(page);
-                       }
-                       unlock_page(page);
+               if (uptodate) {
+                       SetPageUptodate(page);
                } else {
-                       if (uptodate) {
-                               check_page_uptodate(tree, page);
-                       } else {
-                               ClearPageUptodate(page);
-                               SetPageError(page);
-                       }
-                       check_page_locked(tree, page);
+                       ClearPageUptodate(page);
+                       SetPageError(page);
                }
+               unlock_page(page);
        } while (bvec <= bvec_end);
 
        bio_put(bio);
 }
 
+/*
+ * this allocates from the btrfs_bioset.  We're returning a bio right now
+ * but you can call btrfs_io_bio for the appropriate container_of magic
+ */
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
                gfp_t gfp_flags)
 {
        struct bio *bio;
 
-       bio = bio_alloc(gfp_flags, nr_vecs);
+       bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
 
        if (bio == NULL && (current->flags & PF_MEMALLOC)) {
-               while (!bio && (nr_vecs /= 2))
-                       bio = bio_alloc(gfp_flags, nr_vecs);
+               while (!bio && (nr_vecs /= 2)) {
+                       bio = bio_alloc_bioset(gfp_flags,
+                                              nr_vecs, btrfs_bioset);
+               }
        }
 
        if (bio) {
@@ -2571,6 +2566,19 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
        return bio;
 }
 
+struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
+{
+       return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
+}
+
+
+/* this also allocates from the btrfs_bioset */
+struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+       return bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
+}
+
+
 static int __must_check submit_one_bio(int rw, struct bio *bio,
                                       int mirror_num, unsigned long bio_flags)
 {
@@ -3988,7 +3996,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                last_for_get_extent = isize;
        }
 
-       lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
+       lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
                         &cached_state);
 
        em = get_extent_skip_holes(inode, start, last_for_get_extent,
@@ -4075,7 +4083,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 out_free:
        free_extent_map(em);
 out:
-       unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
                             &cached_state, GFP_NOFS);
        return ret;
 }
index a2c03a17500971f131b8a43051b1269e86bfc0af..41fb81e7ec53c3fda80caf28a039af7b4c1a0682 100644 (file)
@@ -336,6 +336,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
                gfp_t gfp_flags);
+struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
+struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
 
 struct btrfs_fs_info;
 
index ecca6c7375a610dbee385a71781cece9082935e0..e53009657f0e5b91b638f3d31d5e4d2cbf919f62 100644 (file)
@@ -197,30 +197,32 @@ int create_free_space_inode(struct btrfs_root *root,
                                         block_group->key.objectid);
 }
 
-int btrfs_truncate_free_space_cache(struct btrfs_root *root,
-                                   struct btrfs_trans_handle *trans,
-                                   struct btrfs_path *path,
-                                   struct inode *inode)
+int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+                                      struct btrfs_block_rsv *rsv)
 {
-       struct btrfs_block_rsv *rsv;
        u64 needed_bytes;
-       loff_t oldsize;
-       int ret = 0;
-
-       rsv = trans->block_rsv;
-       trans->block_rsv = &root->fs_info->global_block_rsv;
+       int ret;
 
        /* 1 for slack space, 1 for updating the inode */
        needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
                btrfs_calc_trans_metadata_size(root, 1);
 
-       spin_lock(&trans->block_rsv->lock);
-       if (trans->block_rsv->reserved < needed_bytes) {
-               spin_unlock(&trans->block_rsv->lock);
-               trans->block_rsv = rsv;
-               return -ENOSPC;
-       }
-       spin_unlock(&trans->block_rsv->lock);
+       spin_lock(&rsv->lock);
+       if (rsv->reserved < needed_bytes)
+               ret = -ENOSPC;
+       else
+               ret = 0;
+       spin_unlock(&rsv->lock);
+       return 0;
+}
+
+int btrfs_truncate_free_space_cache(struct btrfs_root *root,
+                                   struct btrfs_trans_handle *trans,
+                                   struct btrfs_path *path,
+                                   struct inode *inode)
+{
+       loff_t oldsize;
+       int ret = 0;
 
        oldsize = i_size_read(inode);
        btrfs_i_size_write(inode, 0);
@@ -232,9 +234,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
         */
        ret = btrfs_truncate_inode_items(trans, root, inode,
                                         0, BTRFS_EXTENT_DATA_KEY);
-
        if (ret) {
-               trans->block_rsv = rsv;
                btrfs_abort_transaction(trans, root, ret);
                return ret;
        }
@@ -242,7 +242,6 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
        ret = btrfs_update_inode(trans, root, inode);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
-       trans->block_rsv = rsv;
 
        return ret;
 }
@@ -920,10 +919,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 
        /* Make sure we can fit our crcs into the first page */
        if (io_ctl.check_crcs &&
-           (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
-               WARN_ON(1);
+           (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
                goto out_nospc;
-       }
 
        io_ctl_set_generation(&io_ctl, trans->transid);
 
index 4dc17d8809c76d25cddb635e29bd8309b340f464..8b7f19f4496153886975547923cdcdc815940e96 100644 (file)
@@ -54,6 +54,8 @@ int create_free_space_inode(struct btrfs_root *root,
                            struct btrfs_block_group_cache *block_group,
                            struct btrfs_path *path);
 
+int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+                                      struct btrfs_block_rsv *rsv);
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                                    struct btrfs_trans_handle *trans,
                                    struct btrfs_path *path,
index d26f67a59e36f9caf3e91832fa1d4c943c8c9040..2c66ddbbe670e0ab80753dc41eb4054aacd00388 100644 (file)
@@ -429,11 +429,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
        num_bytes = trans->bytes_reserved;
        /*
         * 1 item for inode item insertion if need
-        * 3 items for inode item update (in the worst case)
+        * 4 items for inode item update (in the worst case)
+        * 1 items for slack space if we need do truncation
         * 1 item for free space object
         * 3 items for pre-allocation
         */
-       trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
+       trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
        ret = btrfs_block_rsv_add(root, trans->block_rsv,
                                  trans->bytes_reserved,
                                  BTRFS_RESERVE_NO_FLUSH);
@@ -468,7 +469,8 @@ again:
        if (i_size_read(inode) > 0) {
                ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
                if (ret) {
-                       btrfs_abort_transaction(trans, root, ret);
+                       if (ret != -ENOSPC)
+                               btrfs_abort_transaction(trans, root, ret);
                        goto out_put;
                }
        }
index 9b31b3b091fceb6536612b30e64f2fd191d47e89..17f3064b4a3ebf7b65188be58b66713670f00ca9 100644 (file)
@@ -715,8 +715,10 @@ retry:
                                        async_extent->ram_size - 1, 0);
 
                em = alloc_extent_map();
-               if (!em)
+               if (!em) {
+                       ret = -ENOMEM;
                        goto out_free_reserve;
+               }
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
                em->orig_start = em->start;
@@ -923,8 +925,10 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
                }
 
                em = alloc_extent_map();
-               if (!em)
+               if (!em) {
+                       ret = -ENOMEM;
                        goto out_reserve;
+               }
                em->start = start;
                em->orig_start = em->start;
                ram_size = ins.offset;
@@ -4724,6 +4728,7 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root);
 no_delete:
+       btrfs_remove_delayed_node(inode);
        clear_inode(inode);
        return;
 }
@@ -4839,14 +4844,13 @@ static void inode_tree_add(struct inode *inode)
        struct rb_node **p;
        struct rb_node *parent;
        u64 ino = btrfs_ino(inode);
-again:
-       p = &root->inode_tree.rb_node;
-       parent = NULL;
 
        if (inode_unhashed(inode))
                return;
-
+again:
+       parent = NULL;
        spin_lock(&root->inode_lock);
+       p = &root->inode_tree.rb_node;
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct btrfs_inode, rb_node);
@@ -6928,7 +6932,11 @@ struct btrfs_dio_private {
        /* IO errors */
        int errors;
 
+       /* orig_bio is our btrfs_io_bio */
        struct bio *orig_bio;
+
+       /* dio_bio came from fs/direct-io.c */
+       struct bio *dio_bio;
 };
 
 static void btrfs_endio_direct_read(struct bio *bio, int err)
@@ -6938,6 +6946,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
        struct bio_vec *bvec = bio->bi_io_vec;
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct bio *dio_bio;
        u64 start;
 
        start = dip->logical_offset;
@@ -6977,14 +6986,15 @@ failed:
 
        unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
                      dip->logical_offset + dip->bytes - 1);
-       bio->bi_private = dip->private;
+       dio_bio = dip->dio_bio;
 
        kfree(dip);
 
        /* If we had a csum failure make sure to clear the uptodate flag */
        if (err)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       dio_end_io(bio, err);
+               clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+       dio_end_io(dio_bio, err);
+       bio_put(bio);
 }
 
 static void btrfs_endio_direct_write(struct bio *bio, int err)
@@ -6995,6 +7005,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
        struct btrfs_ordered_extent *ordered = NULL;
        u64 ordered_offset = dip->logical_offset;
        u64 ordered_bytes = dip->bytes;
+       struct bio *dio_bio;
        int ret;
 
        if (err)
@@ -7022,14 +7033,15 @@ out_test:
                goto again;
        }
 out_done:
-       bio->bi_private = dip->private;
+       dio_bio = dip->dio_bio;
 
        kfree(dip);
 
        /* If we had an error make sure to clear the uptodate flag */
        if (err)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       dio_end_io(bio, err);
+               clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+       dio_end_io(dio_bio, err);
+       bio_put(bio);
 }
 
 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
@@ -7065,10 +7077,10 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
        if (!atomic_dec_and_test(&dip->pending_bios))
                goto out;
 
-       if (dip->errors)
+       if (dip->errors) {
                bio_io_error(dip->orig_bio);
-       else {
-               set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
+       else {
+               set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
                bio_endio(dip->orig_bio, 0);
        }
 out:
@@ -7243,25 +7255,34 @@ out_err:
        return 0;
 }
 
-static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
-                               loff_t file_offset)
+static void btrfs_submit_direct(int rw, struct bio *dio_bio,
+                               struct inode *inode, loff_t file_offset)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_dio_private *dip;
-       struct bio_vec *bvec = bio->bi_io_vec;
+       struct bio_vec *bvec = dio_bio->bi_io_vec;
+       struct bio *io_bio;
        int skip_sum;
        int write = rw & REQ_WRITE;
        int ret = 0;
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
+       io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
+
+       if (!io_bio) {
+               ret = -ENOMEM;
+               goto free_ordered;
+       }
+
        dip = kmalloc(sizeof(*dip), GFP_NOFS);
        if (!dip) {
                ret = -ENOMEM;
-               goto free_ordered;
+               goto free_io_bio;
        }
 
-       dip->private = bio->bi_private;
+       dip->private = dio_bio->bi_private;
+       io_bio->bi_private = dio_bio->bi_private;
        dip->inode = inode;
        dip->logical_offset = file_offset;
 
@@ -7269,22 +7290,27 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
        do {
                dip->bytes += bvec->bv_len;
                bvec++;
-       } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
+       } while (bvec <= (dio_bio->bi_io_vec + dio_bio->bi_vcnt - 1));
 
-       dip->disk_bytenr = (u64)bio->bi_sector << 9;
-       bio->bi_private = dip;
+       dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+       io_bio->bi_private = dip;
        dip->errors = 0;
-       dip->orig_bio = bio;
+       dip->orig_bio = io_bio;
+       dip->dio_bio = dio_bio;
        atomic_set(&dip->pending_bios, 0);
 
        if (write)
-               bio->bi_end_io = btrfs_endio_direct_write;
+               io_bio->bi_end_io = btrfs_endio_direct_write;
        else
-               bio->bi_end_io = btrfs_endio_direct_read;
+               io_bio->bi_end_io = btrfs_endio_direct_read;
 
        ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
        if (!ret)
                return;
+
+free_io_bio:
+       bio_put(io_bio);
+
 free_ordered:
        /*
         * If this is a write, we need to clean up the reserved space and kill
@@ -7300,7 +7326,7 @@ free_ordered:
                btrfs_put_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
        }
-       bio_endio(bio, ret);
+       bio_endio(dio_bio, ret);
 }
 
 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
@@ -7979,7 +8005,6 @@ void btrfs_destroy_inode(struct inode *inode)
        inode_tree_del(inode);
        btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
 free:
-       btrfs_remove_delayed_node(inode);
        call_rcu(&inode->i_rcu, btrfs_i_callback);
 }
 
@@ -7987,6 +8012,9 @@ int btrfs_drop_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
 
+       if (root == NULL)
+               return 1;
+
        /* the snap/subvol tree is on deleting */
        if (btrfs_root_refs(&root->root_item) == 0 &&
            root != root->fs_info->tree_root)
index 0de4a2fcfb24f1cdd2e8ae9574405bd4ce247526..0f81d67cdc8da651890df1a89c01df9188667d97 100644 (file)
@@ -1801,7 +1801,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
                item_off = btrfs_item_ptr_offset(leaf, i);
                item_len = btrfs_item_size_nr(leaf, i);
 
-               if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+               btrfs_item_key_to_cpu(leaf, key, i);
+               if (!key_in_sk(key, sk))
+                       continue;
+
+               if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
                        item_len = 0;
 
                if (sizeof(sh) + item_len + *sk_offset >
@@ -1810,10 +1814,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
                        goto overflow;
                }
 
-               btrfs_item_key_to_cpu(leaf, key, i);
-               if (!key_in_sk(key, sk))
-                       continue;
-
                sh.objectid = key->objectid;
                sh.offset = key->offset;
                sh.type = key->type;
index 0740621daf6ca370498e78688922347c54e172df..0525e1389f5b16658ccea028da6408da812c974b 100644 (file)
@@ -1050,7 +1050,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        }
 
        /* put a new bio on the list */
-       bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
        if (!bio)
                return -ENOMEM;
 
index 704a1b8d2a2bae870fcf678d27aa4601f6745011..4febca4fc2de7fe79fb9239a5e44f9ce3ad4eba0 100644 (file)
@@ -1773,7 +1773,7 @@ again:
                        if (!eb || !extent_buffer_uptodate(eb)) {
                                ret = (!eb) ? -ENOMEM : -EIO;
                                free_extent_buffer(eb);
-                               return ret;
+                               break;
                        }
                        btrfs_tree_lock(eb);
                        if (cow) {
@@ -3350,6 +3350,11 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
        }
 
 truncate:
+       ret = btrfs_check_trunc_cache_free_space(root,
+                                                &fs_info->global_block_rsv);
+       if (ret)
+               goto out;
+
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
@@ -4077,7 +4082,7 @@ out:
        return inode;
 }
 
-static struct reloc_control *alloc_reloc_control(void)
+static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
 {
        struct reloc_control *rc;
 
@@ -4088,7 +4093,8 @@ static struct reloc_control *alloc_reloc_control(void)
        INIT_LIST_HEAD(&rc->reloc_roots);
        backref_cache_init(&rc->backref_cache);
        mapping_tree_init(&rc->reloc_root_tree);
-       extent_io_tree_init(&rc->processed_blocks, NULL);
+       extent_io_tree_init(&rc->processed_blocks,
+                           fs_info->btree_inode->i_mapping);
        return rc;
 }
 
@@ -4105,7 +4111,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
        int rw = 0;
        int err = 0;
 
-       rc = alloc_reloc_control();
+       rc = alloc_reloc_control(fs_info);
        if (!rc)
                return -ENOMEM;
 
@@ -4306,7 +4312,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
        if (list_empty(&reloc_roots))
                goto out;
 
-       rc = alloc_reloc_control();
+       rc = alloc_reloc_control(root->fs_info);
        if (!rc) {
                err = -ENOMEM;
                goto out;
index f489e24659a434fa2b67984a5a8a7cf9df2b06b6..79bd479317cb53cbea30a7021d81a891ed9ae20c 100644 (file)
@@ -1296,7 +1296,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                }
 
                WARN_ON(!page->page);
-               bio = bio_alloc(GFP_NOFS, 1);
+               bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
                if (!bio) {
                        page->io_error = 1;
                        sblock->no_io_error_seen = 0;
@@ -1431,7 +1431,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                        return -EIO;
                }
 
-               bio = bio_alloc(GFP_NOFS, 1);
+               bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
                if (!bio)
                        return -EIO;
                bio->bi_bdev = page_bad->dev->bdev;
@@ -1522,7 +1522,7 @@ again:
                sbio->dev = wr_ctx->tgtdev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
+                       bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
                        if (!bio) {
                                mutex_unlock(&wr_ctx->wr_lock);
                                return -ENOMEM;
@@ -1930,7 +1930,7 @@ again:
                sbio->dev = spage->dev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
+                       bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
                        if (!bio)
                                return -ENOMEM;
                        sbio->bio = bio;
@@ -3307,7 +3307,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
                        "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
                return -EIO;
        }
-       bio = bio_alloc(GFP_NOFS, 1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
index a4807ced23cc5791d500d28ee9f91fdc915f5df0..f0857e092a3cb1af485604850052440579a1bbf1 100644 (file)
@@ -1263,6 +1263,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 
                btrfs_dev_replace_suspend_for_unmount(fs_info);
                btrfs_scrub_cancel(fs_info);
+               btrfs_pause_balance(fs_info);
 
                ret = btrfs_commit_super(root);
                if (ret)
index 0e925ced971ba87bc0e356c6a2c7e3dbf1b5e91e..8bffb9174afba04d8375b96f754256b68ff9b4ef 100644 (file)
@@ -3120,14 +3120,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
        allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
        if (num_devices == 1)
                allowed |= BTRFS_BLOCK_GROUP_DUP;
-       else if (num_devices < 4)
+       else if (num_devices > 1)
                allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
-       else
-               allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
-                               BTRFS_BLOCK_GROUP_RAID10 |
-                               BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6);
-
+       if (num_devices > 2)
+               allowed |= BTRFS_BLOCK_GROUP_RAID5;
+       if (num_devices > 3)
+               allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
+                           BTRFS_BLOCK_GROUP_RAID6);
        if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
            (!alloc_profile_is_valid(bctl->data.target, 1) ||
             (bctl->data.target & ~allowed))) {
@@ -5019,42 +5018,16 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        return 0;
 }
 
-static void *merge_stripe_index_into_bio_private(void *bi_private,
-                                                unsigned int stripe_index)
-{
-       /*
-        * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
-        * at most 1.
-        * The alternative solution (instead of stealing bits from the
-        * pointer) would be to allocate an intermediate structure
-        * that contains the old private pointer plus the stripe_index.
-        */
-       BUG_ON((((uintptr_t)bi_private) & 3) != 0);
-       BUG_ON(stripe_index > 3);
-       return (void *)(((uintptr_t)bi_private) | stripe_index);
-}
-
-static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
-{
-       return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
-}
-
-static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
-{
-       return (unsigned int)((uintptr_t)bi_private) & 3;
-}
-
 static void btrfs_end_bio(struct bio *bio, int err)
 {
-       struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
+       struct btrfs_bio *bbio = bio->bi_private;
        int is_orig_bio = 0;
 
        if (err) {
                atomic_inc(&bbio->error);
                if (err == -EIO || err == -EREMOTEIO) {
                        unsigned int stripe_index =
-                               extract_stripe_index_from_bio_private(
-                                       bio->bi_private);
+                               btrfs_io_bio(bio)->stripe_index;
                        struct btrfs_device *dev;
 
                        BUG_ON(stripe_index >= bbio->num_stripes);
@@ -5084,8 +5057,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                }
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
-               bio->bi_bdev = (struct block_device *)
-                                       (unsigned long)bbio->mirror_num;
+               btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                /* only send an error to the higher layers if it is
                 * beyond the tolerance of the btrfs bio
                 */
@@ -5211,8 +5183,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
        struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
 
        bio->bi_private = bbio;
-       bio->bi_private = merge_stripe_index_into_bio_private(
-                       bio->bi_private, (unsigned int)dev_nr);
+       btrfs_io_bio(bio)->stripe_index = dev_nr;
        bio->bi_end_io = btrfs_end_bio;
        bio->bi_sector = physical >> 9;
 #ifdef DEBUG
@@ -5273,8 +5244,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
        if (atomic_dec_and_test(&bbio->stripes_pending)) {
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
-               bio->bi_bdev = (struct block_device *)
-                       (unsigned long)bbio->mirror_num;
+               btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                bio->bi_sector = logical >> 9;
                kfree(bbio);
                bio_endio(bio, -EIO);
@@ -5352,7 +5322,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                }
 
                if (dev_nr < total_devs - 1) {
-                       bio = bio_clone(first_bio, GFP_NOFS);
+                       bio = btrfs_bio_clone(first_bio, GFP_NOFS);
                        BUG_ON(!bio); /* -ENOMEM */
                } else {
                        bio = first_bio;
index 845ccbb0d2e35e9ad5b92101102b7e853d800ab0..f6247e2a47f7b643d8c88f2ee4c5b53f5c79f043 100644 (file)
@@ -152,6 +152,26 @@ struct btrfs_fs_devices {
        int rotating;
 };
 
+/*
+ * we need the mirror number and stripe index to be passed around
+ * the call chain while we are processing end_io (especially errors).
+ * Really, what we need is a btrfs_bio structure that has this info
+ * and is properly sized with its stripe array, but we're not there
+ * quite yet.  We have our own btrfs bioset, and all of the bios
+ * we allocate are actually btrfs_io_bios.  We'll cram as much of
+ * struct btrfs_bio as we can into this over time.
+ */
+struct btrfs_io_bio {
+       unsigned long mirror_num;
+       unsigned long stripe_index;
+       struct bio bio;
+};
+
+static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
+{
+       return container_of(bio, struct btrfs_io_bio, bio);
+}
+
 struct btrfs_bio_stripe {
        struct btrfs_device *dev;
        u64 physical;
index 202dd3d68be02f608c0998ab1272cccf91249769..ebbf680378e2ad3ad956babeb5b521e619830bcb 100644 (file)
@@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
 }
 
 /**
- * Encode the flock and fcntl locks for the given inode into the pagelist.
- * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
- * sequential flock locks.
- * Must be called with lock_flocks() already held.
- * If we encounter more of a specific lock type than expected,
- * we return the value 1.
+ * Encode the flock and fcntl locks for the given inode into the ceph_filelock
+ * array. Must be called with lock_flocks() already held.
+ * If we encounter more of a specific lock type than expected, return -ENOSPC.
  */
-int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
-                     int num_fcntl_locks, int num_flock_locks)
+int ceph_encode_locks_to_buffer(struct inode *inode,
+                               struct ceph_filelock *flocks,
+                               int num_fcntl_locks, int num_flock_locks)
 {
        struct file_lock *lock;
-       struct ceph_filelock cephlock;
        int err = 0;
        int seen_fcntl = 0;
        int seen_flock = 0;
+       int l = 0;
 
        dout("encoding %d flock and %d fcntl locks", num_flock_locks,
             num_fcntl_locks);
-       err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32));
-       if (err)
-               goto fail;
+
        for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
                if (lock->fl_flags & FL_POSIX) {
                        ++seen_fcntl;
@@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
                                err = -ENOSPC;
                                goto fail;
                        }
-                       err = lock_to_ceph_filelock(lock, &cephlock);
+                       err = lock_to_ceph_filelock(lock, &flocks[l]);
                        if (err)
                                goto fail;
-                       err = ceph_pagelist_append(pagelist, &cephlock,
-                                          sizeof(struct ceph_filelock));
+                       ++l;
                }
-               if (err)
-                       goto fail;
        }
-
-       err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32));
-       if (err)
-               goto fail;
        for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
                if (lock->fl_flags & FL_FLOCK) {
                        ++seen_flock;
@@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
                                err = -ENOSPC;
                                goto fail;
                        }
-                       err = lock_to_ceph_filelock(lock, &cephlock);
+                       err = lock_to_ceph_filelock(lock, &flocks[l]);
                        if (err)
                                goto fail;
-                       err = ceph_pagelist_append(pagelist, &cephlock,
-                                          sizeof(struct ceph_filelock));
+                       ++l;
                }
-               if (err)
-                       goto fail;
        }
 fail:
        return err;
 }
 
+/**
+ * Copy the encoded flock and fcntl locks into the pagelist.
+ * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
+ * sequential flock locks.
+ * Returns zero on success.
+ */
+int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
+                          struct ceph_pagelist *pagelist,
+                          int num_fcntl_locks, int num_flock_locks)
+{
+       int err = 0;
+       __le32 nlocks;
+
+       nlocks = cpu_to_le32(num_fcntl_locks);
+       err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
+       if (err)
+               goto out_fail;
+
+       err = ceph_pagelist_append(pagelist, flocks,
+                                  num_fcntl_locks * sizeof(*flocks));
+       if (err)
+               goto out_fail;
+
+       nlocks = cpu_to_le32(num_flock_locks);
+       err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
+       if (err)
+               goto out_fail;
+
+       err = ceph_pagelist_append(pagelist,
+                                  &flocks[num_fcntl_locks],
+                                  num_flock_locks * sizeof(*flocks));
+out_fail:
+       return err;
+}
+
 /*
  * Given a pointer to a lock, convert it to a ceph filelock
  */
index 4f22671a5bd4ce1e8c2c0ea06d84b31e48524775..4d2920304be8e7e9075fd1f1b80b954cb26718f9 100644 (file)
@@ -2478,39 +2478,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
 
        if (recon_state->flock) {
                int num_fcntl_locks, num_flock_locks;
-               struct ceph_pagelist_cursor trunc_point;
-
-               ceph_pagelist_set_cursor(pagelist, &trunc_point);
-               do {
-                       lock_flocks();
-                       ceph_count_locks(inode, &num_fcntl_locks,
-                                        &num_flock_locks);
-                       rec.v2.flock_len = (2*sizeof(u32) +
-                                           (num_fcntl_locks+num_flock_locks) *
-                                           sizeof(struct ceph_filelock));
-                       unlock_flocks();
-
-                       /* pre-alloc pagelist */
-                       ceph_pagelist_truncate(pagelist, &trunc_point);
-                       err = ceph_pagelist_append(pagelist, &rec, reclen);
-                       if (!err)
-                               err = ceph_pagelist_reserve(pagelist,
-                                                           rec.v2.flock_len);
-
-                       /* encode locks */
-                       if (!err) {
-                               lock_flocks();
-                               err = ceph_encode_locks(inode,
-                                                       pagelist,
-                                                       num_fcntl_locks,
-                                                       num_flock_locks);
-                               unlock_flocks();
-                       }
-               } while (err == -ENOSPC);
+               struct ceph_filelock *flocks;
+
+encode_again:
+               lock_flocks();
+               ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
+               unlock_flocks();
+               flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
+                                sizeof(struct ceph_filelock), GFP_NOFS);
+               if (!flocks) {
+                       err = -ENOMEM;
+                       goto out_free;
+               }
+               lock_flocks();
+               err = ceph_encode_locks_to_buffer(inode, flocks,
+                                                 num_fcntl_locks,
+                                                 num_flock_locks);
+               unlock_flocks();
+               if (err) {
+                       kfree(flocks);
+                       if (err == -ENOSPC)
+                               goto encode_again;
+                       goto out_free;
+               }
+               /*
+                * number of encoded locks is stable, so copy to pagelist
+                */
+               rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
+                                   (num_fcntl_locks+num_flock_locks) *
+                                   sizeof(struct ceph_filelock));
+               err = ceph_pagelist_append(pagelist, &rec, reclen);
+               if (!err)
+                       err = ceph_locks_to_pagelist(flocks, pagelist,
+                                                    num_fcntl_locks,
+                                                    num_flock_locks);
+               kfree(flocks);
        } else {
                err = ceph_pagelist_append(pagelist, &rec, reclen);
        }
-
 out_free:
        kfree(path);
 out_dput:
index 8696be2ff6799690673932b9fac5086b37a10c24..7ccfdb4aea2e008e63f7ba00ab080c0fda2d0d09 100644 (file)
@@ -822,8 +822,13 @@ extern const struct export_operations ceph_export_ops;
 extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
 extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
 extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
-extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p,
-                            int p_locks, int f_locks);
+extern int ceph_encode_locks_to_buffer(struct inode *inode,
+                                      struct ceph_filelock *flocks,
+                                      int num_fcntl_locks,
+                                      int num_flock_locks);
+extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
+                                 struct ceph_pagelist *pagelist,
+                                 int num_fcntl_locks, int num_flock_locks);
 extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
 
 /* debugfs.c */
index 8e33ec65847b1989749a4b1b72e239fa0c64d94a..58df174deb10e5b354a9003ea3ccbfc5240e5cd0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/vfs.h>
 #include <linux/fs.h>
+#include <linux/inet.h>
 #include "cifsglob.h"
 #include "cifsproto.h"
 #include "cifsfs.h"
@@ -48,58 +49,74 @@ void cifs_dfs_release_automount_timer(void)
 }
 
 /**
- * cifs_get_share_name -       extracts share name from UNC
- * @node_name: pointer to UNC string
+ * cifs_build_devname - build a devicename from a UNC and optional prepath
+ * @nodename:  pointer to UNC string
+ * @prepath:   pointer to prefixpath (or NULL if there isn't one)
  *
- * Extracts sharename form full UNC.
- * i.e. strips from UNC trailing path that is not part of share
- * name and fixup missing '\' in the beginning of DFS node refferal
- * if necessary.
- * Returns pointer to share name on success or ERR_PTR on error.
- * Caller is responsible for freeing returned string.
+ * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer
+ * big enough to hold the final thing. Copy the UNC from the nodename, and
+ * concatenate the prepath onto the end of it if there is one.
+ *
+ * Returns pointer to the built string, or a ERR_PTR. Caller is responsible
+ * for freeing the returned string.
  */
-static char *cifs_get_share_name(const char *node_name)
+static char *
+cifs_build_devname(char *nodename, const char *prepath)
 {
-       int len;
-       char *UNC;
-       char *pSep;
-
-       len = strlen(node_name);
-       UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
-                        GFP_KERNEL);
-       if (!UNC)
-               return ERR_PTR(-ENOMEM);
+       size_t pplen;
+       size_t unclen;
+       char *dev;
+       char *pos;
+
+       /* skip over any preceding delimiters */
+       nodename += strspn(nodename, "\\");
+       if (!*nodename)
+               return ERR_PTR(-EINVAL);
 
-       /* get share name and server name */
-       if (node_name[1] != '\\') {
-               UNC[0] = '\\';
-               strncpy(UNC+1, node_name, len);
-               len++;
-               UNC[len] = 0;
-       } else {
-               strncpy(UNC, node_name, len);
-               UNC[len] = 0;
-       }
+       /* get length of UNC and set pos to last char */
+       unclen = strlen(nodename);
+       pos = nodename + unclen - 1;
 
-       /* find server name end */
-       pSep = memchr(UNC+2, '\\', len-2);
-       if (!pSep) {
-               cifs_dbg(VFS, "%s: no server name end in node name: %s\n",
-                        __func__, node_name);
-               kfree(UNC);
-               return ERR_PTR(-EINVAL);
+       /* trim off any trailing delimiters */
+       while (*pos == '\\') {
+               --pos;
+               --unclen;
        }
 
-       /* find sharename end */
-       pSep++;
-       pSep = memchr(UNC+(pSep-UNC), '\\', len-(pSep-UNC));
-       if (pSep) {
-               /* trim path up to sharename end
-                * now we have share name in UNC */
-               *pSep = 0;
+       /* allocate a buffer:
+        * +2 for preceding "//"
+        * +1 for delimiter between UNC and prepath
+        * +1 for trailing NULL
+        */
+       pplen = prepath ? strlen(prepath) : 0;
+       dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL);
+       if (!dev)
+               return ERR_PTR(-ENOMEM);
+
+       pos = dev;
+       /* add the initial "//" */
+       *pos = '/';
+       ++pos;
+       *pos = '/';
+       ++pos;
+
+       /* copy in the UNC portion from referral */
+       memcpy(pos, nodename, unclen);
+       pos += unclen;
+
+       /* copy the prefixpath remainder (if there is one) */
+       if (pplen) {
+               *pos = '/';
+               ++pos;
+               memcpy(pos, prepath, pplen);
+               pos += pplen;
        }
 
-       return UNC;
+       /* NULL terminator */
+       *pos = '\0';
+
+       convert_delimiter(dev, '/');
+       return dev;
 }
 
 
@@ -123,6 +140,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
 {
        int rc;
        char *mountdata = NULL;
+       const char *prepath = NULL;
        int md_len;
        char *tkn_e;
        char *srvIP = NULL;
@@ -132,7 +150,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
        if (sb_mountdata == NULL)
                return ERR_PTR(-EINVAL);
 
-       *devname = cifs_get_share_name(ref->node_name);
+       if (strlen(fullpath) - ref->path_consumed)
+               prepath = fullpath + ref->path_consumed;
+
+       *devname = cifs_build_devname(ref->node_name, prepath);
        if (IS_ERR(*devname)) {
                rc = PTR_ERR(*devname);
                *devname = NULL;
@@ -146,12 +167,14 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
                goto compose_mount_options_err;
        }
 
-       /* md_len = strlen(...) + 12 for 'sep+prefixpath='
-        * assuming that we have 'unc=' and 'ip=' in
-        * the original sb_mountdata
+       /*
+        * In most cases, we'll be building a shorter string than the original,
+        * but we do have to assume that the address in the ip= option may be
+        * much longer than the original. Add the max length of an address
+        * string to the length of the original string to allow for worst case.
         */
-       md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12;
-       mountdata = kzalloc(md_len+1, GFP_KERNEL);
+       md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
+       mountdata = kzalloc(md_len + 1, GFP_KERNEL);
        if (mountdata == NULL) {
                rc = -ENOMEM;
                goto compose_mount_options_err;
@@ -195,26 +218,6 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
                strncat(mountdata, &sep, 1);
        strcat(mountdata, "ip=");
        strcat(mountdata, srvIP);
-       strncat(mountdata, &sep, 1);
-       strcat(mountdata, "unc=");
-       strcat(mountdata, *devname);
-
-       /* find & copy prefixpath */
-       tkn_e = strchr(ref->node_name + 2, '\\');
-       if (tkn_e == NULL) {
-               /* invalid unc, missing share name*/
-               rc = -EINVAL;
-               goto compose_mount_options_err;
-       }
-
-       tkn_e = strchr(tkn_e + 1, '\\');
-       if (tkn_e || (strlen(fullpath) - ref->path_consumed)) {
-               strncat(mountdata, &sep, 1);
-               strcat(mountdata, "prefixpath=");
-               if (tkn_e)
-                       strcat(mountdata, tkn_e + 1);
-               strcat(mountdata, fullpath + ref->path_consumed);
-       }
 
        /*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
        /*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
index 72e4efee138986e65f376496eca41016b55570b2..3752b9f6d9e46e90876b18f4d14527f9262baff6 100644 (file)
@@ -372,9 +372,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
        cifs_show_security(s, tcon->ses->server);
        cifs_show_cache_flavor(s, cifs_sb);
 
-       seq_printf(s, ",unc=");
-       seq_escape(s, tcon->treeName, " \t\n\\");
-
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
                seq_printf(s, ",multiuser");
        else if (tcon->ses->user_name)
index 99eeaa17ee006956d37350cad00bfd854c8b5306..e3bc39bb9d12b224d7cda96e2e55b9babed68c0b 100644 (file)
@@ -1061,6 +1061,7 @@ static int cifs_parse_security_flavors(char *value,
 #endif
        case Opt_sec_none:
                vol->nullauth = 1;
+               vol->secFlg |= CIFSSEC_MAY_NTLM;
                break;
        default:
                cifs_dbg(VFS, "bad security option: %s\n", value);
@@ -1257,14 +1258,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
        vol->backupuid_specified = false; /* no backup intent for a user */
        vol->backupgid_specified = false; /* no backup intent for a group */
 
-       /*
-        * For now, we ignore -EINVAL errors under the assumption that the
-        * unc= and prefixpath= options will be usable.
-        */
-       if (cifs_parse_devname(devname, vol) == -ENOMEM) {
-               printk(KERN_ERR "CIFS: Unable to allocate memory to parse "
-                               "device string.\n");
-               goto out_nomem;
+       switch (cifs_parse_devname(devname, vol)) {
+       case 0:
+               break;
+       case -ENOMEM:
+               cifs_dbg(VFS, "Unable to allocate memory for devname.\n");
+               goto cifs_parse_mount_err;
+       case -EINVAL:
+               cifs_dbg(VFS, "Malformed UNC in devname.\n");
+               goto cifs_parse_mount_err;
+       default:
+               cifs_dbg(VFS, "Unknown error parsing devname.\n");
+               goto cifs_parse_mount_err;
        }
 
        while ((data = strsep(&options, separator)) != NULL) {
@@ -1826,7 +1831,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
        }
 #endif
        if (!vol->UNC) {
-               cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string or in unc= option!\n");
+               cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string!\n");
                goto cifs_parse_mount_err;
        }
 
@@ -3274,8 +3279,8 @@ build_unc_path_to_root(const struct smb_vol *vol,
        pos = full_path + unc_len;
 
        if (pplen) {
-               *pos++ = CIFS_DIR_SEP(cifs_sb);
-               strncpy(pos, vol->prepath, pplen);
+               *pos = CIFS_DIR_SEP(cifs_sb);
+               strncpy(pos + 1, vol->prepath, pplen);
                pos += pplen;
        }
 
index e7512e497611fdcbed49d2be533ae4e97f7250fe..7ede7306599f47449bdd02533db47bfede2c81df 100644 (file)
@@ -34,7 +34,7 @@
 
 /**
  * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
- * @unc: UNC path specifying the server
+ * @unc: UNC path specifying the server (with '/' as delimiter)
  * @ip_addr: Where to return the IP address.
  *
  * The IP address will be returned in string form, and the caller is
@@ -64,7 +64,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
        hostname = unc + 2;
 
        /* Search for server name delimiter */
-       sep = memchr(hostname, '\\', len);
+       sep = memchr(hostname, '/', len);
        if (sep)
                len = sep - hostname;
        else
index fc3025199cb336f6c07c8172d19e501601a3f1f7..20efd81266c643338bcd81b434edfe9fbcacc0ac 100644 (file)
@@ -171,7 +171,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
 
        if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
                inode->i_flags |= S_AUTOMOUNT;
-       cifs_set_ops(inode);
+       if (inode->i_state & I_NEW)
+               cifs_set_ops(inode);
 }
 
 void
index 201f0a0d6b0a2a2be8a2983d2fb63ef27dacc0d8..a7abbea2c09638ef8c190555ec466834c0c06edf 100644 (file)
@@ -295,6 +295,12 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
 static int
 ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
+       int rc;
+
+       rc = filemap_write_and_wait(file->f_mapping);
+       if (rc)
+               return rc;
+
        return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
 }
 
index bfb531564319608be19b63885142eb6b5c7648d9..8dd524f322847b346e119c6d5c5b3693de6fb8bc 100644 (file)
@@ -44,8 +44,11 @@ static ssize_t efivarfs_file_write(struct file *file,
 
        bytes = efivar_entry_set_get_size(var, attributes, &datasize,
                                          data, &set);
-       if (!set && bytes)
+       if (!set && bytes) {
+               if (bytes == -ENOENT)
+                       bytes = -EIO;
                goto out;
+       }
 
        if (bytes == -ENOENT) {
                drop_nlink(inode);
@@ -76,7 +79,14 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
        int err;
 
        err = efivar_entry_size(var, &datasize);
-       if (err)
+
+       /*
+        * efivarfs represents uncommitted variables with
+        * zero-length files. Reading them should return EOF.
+        */
+       if (err == -ENOENT)
+               return 0;
+       else if (err)
                return err;
 
        data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
index 0aabb344b02e6779ed410adff60a9c0e3ab4003e..5aae3d12d4004109cff811b77011486abbaed1b5 100644 (file)
@@ -209,7 +209,6 @@ typedef struct ext4_io_end {
        ssize_t                 size;           /* size of the extent */
        struct kiocb            *iocb;          /* iocb struct for AIO */
        int                     result;         /* error value for AIO */
-       atomic_t                count;          /* reference counter */
 } ext4_io_end_t;
 
 struct ext4_io_submit {
@@ -2651,14 +2650,11 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 
 /* page-io.c */
 extern int __init ext4_init_pageio(void);
+extern void ext4_add_complete_io(ext4_io_end_t *io_end);
 extern void ext4_exit_pageio(void);
 extern void ext4_ioend_shutdown(struct inode *);
+extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
-extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
-extern int ext4_put_io_end(ext4_io_end_t *io_end);
-extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
-extern void ext4_io_submit_init(struct ext4_io_submit *io,
-                               struct writeback_control *wbc);
 extern void ext4_end_io_work(struct work_struct *work);
 extern void ext4_io_submit(struct ext4_io_submit *io);
 extern int ext4_bio_write_page(struct ext4_io_submit *io,
index 107936db244eddd5e7192b657ccef02841d3b617..bc0f1910b9cfa7dd5aa93b9c5d889c6fbacd037d 100644 (file)
@@ -3642,7 +3642,7 @@ int ext4_find_delalloc_range(struct inode *inode,
 {
        struct extent_status es;
 
-       ext4_es_find_delayed_extent(inode, lblk_start, &es);
+       ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
        if (es.es_len == 0)
                return 0; /* there is no delay extent in this tree */
        else if (es.es_lblk <= lblk_start &&
@@ -4608,9 +4608,10 @@ static int ext4_find_delayed_extent(struct inode *inode,
        struct extent_status es;
        ext4_lblk_t block, next_del;
 
-       ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
-
        if (newes->es_pblk == 0) {
+               ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
+                               newes->es_lblk + newes->es_len - 1, &es);
+
                /*
                 * No extent in extent-tree contains block @newes->es_pblk,
                 * then the block may stay in 1)a hole or 2)delayed-extent.
@@ -4630,7 +4631,7 @@ static int ext4_find_delayed_extent(struct inode *inode,
        }
 
        block = newes->es_lblk + newes->es_len;
-       ext4_es_find_delayed_extent(inode, block, &es);
+       ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
        if (es.es_len == 0)
                next_del = EXT_MAX_BLOCKS;
        else
index fe3337a85edeaecd6135333759173d8c6fbc0e78..e6941e622d310eb1ab47793b7c2e984609cebf0b 100644 (file)
@@ -232,14 +232,16 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
 }
 
 /*
- * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
- * if it exists, otherwise, the next extent after @es->lblk.
+ * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
+ * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
  *
  * @inode: the inode which owns delayed extents
  * @lblk: the offset where we start to search
+ * @end: the offset where we stop to search
  * @es: delayed extent that we found
  */
-void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+void ext4_es_find_delayed_extent_range(struct inode *inode,
+                                ext4_lblk_t lblk, ext4_lblk_t end,
                                 struct extent_status *es)
 {
        struct ext4_es_tree *tree = NULL;
@@ -247,7 +249,8 @@ void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
        struct rb_node *node;
 
        BUG_ON(es == NULL);
-       trace_ext4_es_find_delayed_extent_enter(inode, lblk);
+       BUG_ON(end < lblk);
+       trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
 
        read_lock(&EXT4_I(inode)->i_es_lock);
        tree = &EXT4_I(inode)->i_es_tree;
@@ -270,6 +273,10 @@ out:
        if (es1 && !ext4_es_is_delayed(es1)) {
                while ((node = rb_next(&es1->rb_node)) != NULL) {
                        es1 = rb_entry(node, struct extent_status, rb_node);
+                       if (es1->es_lblk > end) {
+                               es1 = NULL;
+                               break;
+                       }
                        if (ext4_es_is_delayed(es1))
                                break;
                }
@@ -285,7 +292,7 @@ out:
        read_unlock(&EXT4_I(inode)->i_es_lock);
 
        ext4_es_lru_add(inode);
-       trace_ext4_es_find_delayed_extent_exit(inode, es);
+       trace_ext4_es_find_delayed_extent_range_exit(inode, es);
 }
 
 static struct extent_status *
index d8e2d4dc311e62c99843fa16c7fc8d4a0798bddb..f740eb03b7079b755d3033225a08546ab450c117 100644 (file)
@@ -62,7 +62,8 @@ extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
                                 unsigned long long status);
 extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
                                 ext4_lblk_t len);
-extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+extern void ext4_es_find_delayed_extent_range(struct inode *inode,
+                                       ext4_lblk_t lblk, ext4_lblk_t end,
                                        struct extent_status *es);
 extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
                                 struct extent_status *es);
index 4959e29573b68d0ed2fec94c28015c52cb22afb9..b1b4d51b5d86b4e54c179ddce5f5b574238b3629 100644 (file)
@@ -465,7 +465,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
                 * If there is a delay extent at this offset,
                 * it will be as a data.
                 */
-               ext4_es_find_delayed_extent(inode, last, &es);
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
                        if (last != start)
                                dataoff = last << blkbits;
@@ -548,7 +548,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
                 * If there is a delay extent at this offset,
                 * we will skip this extent.
                 */
-               ext4_es_find_delayed_extent(inode, last, &es);
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
                        last = es.es_lblk + es.es_len;
                        holeoff = last << blkbits;
index 0723774bdfb5cd3ef27600eaeac7062c330414ac..d6382b89ecbde3077720ebc6a9bb56254883e2d7 100644 (file)
@@ -1488,10 +1488,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
        struct ext4_io_submit io_submit;
 
        BUG_ON(mpd->next_page <= mpd->first_page);
-       ext4_io_submit_init(&io_submit, mpd->wbc);
-       io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
-       if (!io_submit.io_end)
-               return -ENOMEM;
+       memset(&io_submit, 0, sizeof(io_submit));
        /*
         * We need to start from the first_page to the next_page - 1
         * to make sure we also write the mapped dirty buffer_heads.
@@ -1579,8 +1576,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                pagevec_release(&pvec);
        }
        ext4_io_submit(&io_submit);
-       /* Drop io_end reference we got from init */
-       ext4_put_io_end_defer(io_submit.io_end);
        return ret;
 }
 
@@ -2239,16 +2234,9 @@ static int ext4_writepage(struct page *page,
                 */
                return __ext4_journalled_writepage(page, len);
 
-       ext4_io_submit_init(&io_submit, wbc);
-       io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
-       if (!io_submit.io_end) {
-               redirty_page_for_writepage(wbc, page);
-               return -ENOMEM;
-       }
+       memset(&io_submit, 0, sizeof(io_submit));
        ret = ext4_bio_write_page(&io_submit, page, len, wbc);
        ext4_io_submit(&io_submit);
-       /* Drop io_end reference we got from init */
-       ext4_put_io_end_defer(io_submit.io_end);
        return ret;
 }
 
@@ -3079,13 +3067,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        struct inode *inode = file_inode(iocb->ki_filp);
         ext4_io_end_t *io_end = iocb->private;
 
-       /* if not async direct IO just return */
-       if (!io_end) {
-               inode_dio_done(inode);
-               if (is_async)
-                       aio_complete(iocb, ret, 0);
-               return;
-       }
+       /* if not async direct IO or dio with 0 bytes write, just return */
+       if (!io_end || !size)
+               goto out;
 
        ext_debug("ext4_end_io_dio(): io_end 0x%p "
                  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3093,13 +3077,25 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  size);
 
        iocb->private = NULL;
+
+       /* if not aio dio with unwritten extents, just free io and return */
+       if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+               ext4_free_io_end(io_end);
+out:
+               inode_dio_done(inode);
+               if (is_async)
+                       aio_complete(iocb, ret, 0);
+               return;
+       }
+
        io_end->offset = offset;
        io_end->size = size;
        if (is_async) {
                io_end->iocb = iocb;
                io_end->result = ret;
        }
-       ext4_put_io_end_defer(io_end);
+
+       ext4_add_complete_io(io_end);
 }
 
 /*
@@ -3133,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        get_block_t *get_block_func = NULL;
        int dio_flags = 0;
        loff_t final_size = offset + count;
-       ext4_io_end_t *io_end = NULL;
 
        /* Use the old path for reads and writes beyond i_size. */
        if (rw != WRITE || final_size > inode->i_size)
@@ -3172,16 +3167,13 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        iocb->private = NULL;
        ext4_inode_aio_set(inode, NULL);
        if (!is_sync_kiocb(iocb)) {
-               io_end = ext4_init_io_end(inode, GFP_NOFS);
+               ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
                if (!io_end) {
                        ret = -ENOMEM;
                        goto retake_lock;
                }
                io_end->flag |= EXT4_IO_END_DIRECT;
-               /*
-                * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
-                */
-               iocb->private = ext4_get_io_end(io_end);
+               iocb->private = io_end;
                /*
                 * we save the io structure for current async direct
                 * IO, so that later ext4_map_blocks() could flag the
@@ -3205,27 +3197,26 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                                   NULL,
                                   dio_flags);
 
+       if (iocb->private)
+               ext4_inode_aio_set(inode, NULL);
        /*
-        * Put our reference to io_end. This can free the io_end structure e.g.
-        * in sync IO case or in case of error. It can even perform extent
-        * conversion if all bios we submitted finished before we got here.
-        * Note that in that case iocb->private can be already set to NULL
-        * here.
+        * The io_end structure takes a reference to the inode, that
+        * structure needs to be destroyed and the reference to the
+        * inode need to be dropped, when IO is complete, even with 0
+        * byte write, or failed.
+        *
+        * In the successful AIO DIO case, the io_end structure will
+        * be destroyed and the reference to the inode will be dropped
+        * after the end_io call back function is called.
+        *
+        * In the case there is 0 byte write, or error case, since VFS
+        * direct IO won't invoke the end_io call back function, we
+        * need to free the end_io structure here.
         */
-       if (io_end) {
-               ext4_inode_aio_set(inode, NULL);
-               ext4_put_io_end(io_end);
-               /*
-                * In case of error or no write ext4_end_io_dio() was not
-                * called so we have to put iocb's reference.
-                */
-               if (ret <= 0 && ret != -EIOCBQUEUED) {
-                       WARN_ON(iocb->private != io_end);
-                       ext4_put_io_end(io_end);
-                       iocb->private = NULL;
-               }
-       }
-       if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+       if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+               ext4_free_io_end(iocb->private);
+               iocb->private = NULL;
+       } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
                                                EXT4_STATE_DIO_UNWRITTEN)) {
                int err;
                /*
index b1ed9e07434ba4a15d86d1a538e6b808917901d6..def84082a9a9b73deadc0d369c63a7ef981d8ea9 100644 (file)
@@ -2105,7 +2105,11 @@ repeat:
                group = ac->ac_g_ex.fe_group;
 
                for (i = 0; i < ngroups; group++, i++) {
-                       if (group == ngroups)
+                       /*
+                        * Artificially restricted ngroups for non-extent
+                        * files makes group > ngroups possible on first loop.
+                        */
+                       if (group >= ngroups)
                                group = 0;
 
                        /* This now checks without needing the buddy page */
index 19599bded62a834be3bc52dbf68cfb0414ec564d..4acf1f78881b6c4c61aa10377a62ea4368106593 100644 (file)
@@ -62,28 +62,15 @@ void ext4_ioend_shutdown(struct inode *inode)
                cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
 }
 
-static void ext4_release_io_end(ext4_io_end_t *io_end)
+void ext4_free_io_end(ext4_io_end_t *io)
 {
-       BUG_ON(!list_empty(&io_end->list));
-       BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
-
-       if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
-               wake_up_all(ext4_ioend_wq(io_end->inode));
-       if (io_end->flag & EXT4_IO_END_DIRECT)
-               inode_dio_done(io_end->inode);
-       if (io_end->iocb)
-               aio_complete(io_end->iocb, io_end->result, 0);
-       kmem_cache_free(io_end_cachep, io_end);
-}
-
-static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
-{
-       struct inode *inode = io_end->inode;
+       BUG_ON(!io);
+       BUG_ON(!list_empty(&io->list));
+       BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
 
-       io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
-       /* Wake up anyone waiting on unwritten extent conversion */
-       if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
-               wake_up_all(ext4_ioend_wq(inode));
+       if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
+               wake_up_all(ext4_ioend_wq(io->inode));
+       kmem_cache_free(io_end_cachep, io);
 }
 
 /* check a range of space and convert unwritten extents to written. */
@@ -106,8 +93,13 @@ static int ext4_end_io(ext4_io_end_t *io)
                         "(inode %lu, offset %llu, size %zd, error %d)",
                         inode->i_ino, offset, size, ret);
        }
-       ext4_clear_io_unwritten_flag(io);
-       ext4_release_io_end(io);
+       /* Wake up anyone waiting on unwritten extent conversion */
+       if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
+               wake_up_all(ext4_ioend_wq(inode));
+       if (io->flag & EXT4_IO_END_DIRECT)
+               inode_dio_done(inode);
+       if (io->iocb)
+               aio_complete(io->iocb, io->result, 0);
        return ret;
 }
 
@@ -138,7 +130,7 @@ static void dump_completed_IO(struct inode *inode)
 }
 
 /* Add the io_end to per-inode completed end_io list. */
-static void ext4_add_complete_io(ext4_io_end_t *io_end)
+void ext4_add_complete_io(ext4_io_end_t *io_end)
 {
        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
        struct workqueue_struct *wq;
@@ -175,6 +167,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
                err = ext4_end_io(io);
                if (unlikely(!ret && err))
                        ret = err;
+               io->flag &= ~EXT4_IO_END_UNWRITTEN;
+               ext4_free_io_end(io);
        }
        return ret;
 }
@@ -206,43 +200,10 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
                atomic_inc(&EXT4_I(inode)->i_ioend_count);
                io->inode = inode;
                INIT_LIST_HEAD(&io->list);
-               atomic_set(&io->count, 1);
        }
        return io;
 }
 
-void ext4_put_io_end_defer(ext4_io_end_t *io_end)
-{
-       if (atomic_dec_and_test(&io_end->count)) {
-               if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
-                       ext4_release_io_end(io_end);
-                       return;
-               }
-               ext4_add_complete_io(io_end);
-       }
-}
-
-int ext4_put_io_end(ext4_io_end_t *io_end)
-{
-       int err = 0;
-
-       if (atomic_dec_and_test(&io_end->count)) {
-               if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
-                       err = ext4_convert_unwritten_extents(io_end->inode,
-                                               io_end->offset, io_end->size);
-                       ext4_clear_io_unwritten_flag(io_end);
-               }
-               ext4_release_io_end(io_end);
-       }
-       return err;
-}
-
-ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
-{
-       atomic_inc(&io_end->count);
-       return io_end;
-}
-
 /*
  * Print an buffer I/O error compatible with the fs/buffer.c.  This
  * provides compatibility with dmesg scrapers that look for a specific
@@ -325,7 +286,12 @@ static void ext4_end_bio(struct bio *bio, int error)
                             bi_sector >> (inode->i_blkbits - 9));
        }
 
-       ext4_put_io_end_defer(io_end);
+       if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+               ext4_free_io_end(io_end);
+               return;
+       }
+
+       ext4_add_complete_io(io_end);
 }
 
 void ext4_io_submit(struct ext4_io_submit *io)
@@ -339,37 +305,40 @@ void ext4_io_submit(struct ext4_io_submit *io)
                bio_put(io->io_bio);
        }
        io->io_bio = NULL;
-}
-
-void ext4_io_submit_init(struct ext4_io_submit *io,
-                        struct writeback_control *wbc)
-{
-       io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
-       io->io_bio = NULL;
+       io->io_op = 0;
        io->io_end = NULL;
 }
 
-static int io_submit_init_bio(struct ext4_io_submit *io,
-                             struct buffer_head *bh)
+static int io_submit_init(struct ext4_io_submit *io,
+                         struct inode *inode,
+                         struct writeback_control *wbc,
+                         struct buffer_head *bh)
 {
+       ext4_io_end_t *io_end;
+       struct page *page = bh->b_page;
        int nvecs = bio_get_nr_vecs(bh->b_bdev);
        struct bio *bio;
 
+       io_end = ext4_init_io_end(inode, GFP_NOFS);
+       if (!io_end)
+               return -ENOMEM;
        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
        bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
+       bio->bi_private = io->io_end = io_end;
        bio->bi_end_io = ext4_end_bio;
-       bio->bi_private = ext4_get_io_end(io->io_end);
-       if (!io->io_end->size)
-               io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT)
-                                    + bh_offset(bh);
+
+       io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
+
        io->io_bio = bio;
+       io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
        io->io_next_block = bh->b_blocknr;
        return 0;
 }
 
 static int io_submit_add_bh(struct ext4_io_submit *io,
                            struct inode *inode,
+                           struct writeback_control *wbc,
                            struct buffer_head *bh)
 {
        ext4_io_end_t *io_end;
@@ -380,18 +349,18 @@ submit_and_retry:
                ext4_io_submit(io);
        }
        if (io->io_bio == NULL) {
-               ret = io_submit_init_bio(io, bh);
+               ret = io_submit_init(io, inode, wbc, bh);
                if (ret)
                        return ret;
        }
-       ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
-       if (ret != bh->b_size)
-               goto submit_and_retry;
        io_end = io->io_end;
        if (test_clear_buffer_uninit(bh))
                ext4_set_io_unwritten_flag(inode, io_end);
-       io_end->size += bh->b_size;
+       io->io_end->size += bh->b_size;
        io->io_next_block++;
+       ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+       if (ret != bh->b_size)
+               goto submit_and_retry;
        return 0;
 }
 
@@ -463,7 +432,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        do {
                if (!buffer_async_write(bh))
                        continue;
-               ret = io_submit_add_bh(io, inode, bh);
+               ret = io_submit_add_bh(io, inode, wbc, bh);
                if (ret) {
                        /*
                         * We only get here on ENOMEM.  Not much else
index dfce656ddb333a6e7720c8ab0e32984ad82f11f3..5d4513cb1b3c03604792f145125b21fb338a3101 100644 (file)
@@ -1229,6 +1229,19 @@ static int fat_read_root(struct inode *inode)
        return 0;
 }
 
+static unsigned long calc_fat_clusters(struct super_block *sb)
+{
+       struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+       /* Divide first to avoid overflow */
+       if (sbi->fat_bits != 12) {
+               unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
+               return ent_per_sec * sbi->fat_length;
+       }
+
+       return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+}
+
 /*
  * Read the super block of an MS-DOS FS.
  */
@@ -1434,7 +1447,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
 
        /* check that FAT table does not overflow */
-       fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+       fat_clusters = calc_fat_clusters(sb);
        total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
        if (total_clusters > MAX_FAT(sb)) {
                if (!silent)
index cd4d87a82951f6169cd4f5fd8e24a61c45c16b8d..485dc0eddd6707839120324ab552d94ef5ed5ab9 100644 (file)
@@ -306,17 +306,18 @@ void fput(struct file *file)
 {
        if (atomic_long_dec_and_test(&file->f_count)) {
                struct task_struct *task = current;
+               unsigned long flags;
+
                file_sb_list_del(file);
-               if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&delayed_fput_lock, flags);
-                       list_add(&file->f_u.fu_list, &delayed_fput_list);
-                       schedule_work(&delayed_fput_work);
-                       spin_unlock_irqrestore(&delayed_fput_lock, flags);
-                       return;
+               if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+                       init_task_work(&file->f_u.fu_rcuhead, ____fput);
+                       if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
+                               return;
                }
-               init_task_work(&file->f_u.fu_rcuhead, ____fput);
-               task_work_add(task, &file->f_u.fu_rcuhead, true);
+               spin_lock_irqsave(&delayed_fput_lock, flags);
+               list_add(&file->f_u.fu_list, &delayed_fput_list);
+               schedule_work(&delayed_fput_work);
+               spin_unlock_irqrestore(&delayed_fput_lock, flags);
        }
 }
 
index 254df56b847b96d5104e62818c0f1c6f4803673c..f3f783dc4f7509096235c7f8f6ed468fe51ddded 100644 (file)
@@ -180,6 +180,8 @@ u64 fuse_get_attr_version(struct fuse_conn *fc)
 static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
 {
        struct inode *inode;
+       struct dentry *parent;
+       struct fuse_conn *fc;
 
        inode = ACCESS_ONCE(entry->d_inode);
        if (inode && is_bad_inode(inode))
@@ -187,10 +189,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
        else if (fuse_dentry_time(entry) < get_jiffies_64()) {
                int err;
                struct fuse_entry_out outarg;
-               struct fuse_conn *fc;
                struct fuse_req *req;
                struct fuse_forget_link *forget;
-               struct dentry *parent;
                u64 attr_version;
 
                /* For negative dentries, always do a fresh lookup */
@@ -241,8 +241,14 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
                                       entry_attr_timeout(&outarg),
                                       attr_version);
                fuse_change_entry_timeout(entry, &outarg);
+       } else if (inode) {
+               fc = get_fuse_conn(inode);
+               if (fc->readdirplus_auto) {
+                       parent = dget_parent(entry);
+                       fuse_advise_use_readdirplus(parent->d_inode);
+                       dput(parent);
+               }
        }
-       fuse_advise_use_readdirplus(inode);
        return 1;
 }
 
index d1c9b85b3f58bfbbc10919b6ee50e7b4ead9bb2f..e570081f9f76be0be1bc05deb9b260101d018b51 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/compat.h>
 #include <linux/swap.h>
 #include <linux/aio.h>
+#include <linux/falloc.h>
 
 static const struct file_operations fuse_direct_io_file_operations;
 
@@ -1278,7 +1279,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
 
        iov_iter_init(&ii, iov, nr_segs, count, 0);
 
-       req = fuse_get_req(fc, fuse_iter_npages(&ii));
+       if (io->async)
+               req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
+       else
+               req = fuse_get_req(fc, fuse_iter_npages(&ii));
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -1314,7 +1318,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                        break;
                if (count) {
                        fuse_put_request(fc, req);
-                       req = fuse_get_req(fc, fuse_iter_npages(&ii));
+                       if (io->async)
+                               req = fuse_get_req_for_background(fc,
+                                       fuse_iter_npages(&ii));
+                       else
+                               req = fuse_get_req(fc, fuse_iter_npages(&ii));
                        if (IS_ERR(req))
                                break;
                }
@@ -2365,6 +2373,11 @@ static void fuse_do_truncate(struct file *file)
        fuse_do_setattr(inode, &attr, file);
 }
 
+static inline loff_t fuse_round_up(loff_t off)
+{
+       return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
+}
+
 static ssize_t
 fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                        loff_t offset, unsigned long nr_segs)
@@ -2372,6 +2385,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        ssize_t ret = 0;
        struct file *file = iocb->ki_filp;
        struct fuse_file *ff = file->private_data;
+       bool async_dio = ff->fc->async_dio;
        loff_t pos = 0;
        struct inode *inode;
        loff_t i_size;
@@ -2383,10 +2397,10 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        i_size = i_size_read(inode);
 
        /* optimization for short read */
-       if (rw != WRITE && offset + count > i_size) {
+       if (async_dio && rw != WRITE && offset + count > i_size) {
                if (offset >= i_size)
                        return 0;
-               count = i_size - offset;
+               count = min_t(loff_t, count, fuse_round_up(i_size - offset));
        }
 
        io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
@@ -2404,7 +2418,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         * By default, we want to optimize all I/Os with async request
         * submission to the client filesystem if supported.
         */
-       io->async = ff->fc->async_dio;
+       io->async = async_dio;
        io->iocb = iocb;
 
        /*
@@ -2412,7 +2426,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         * to wait on real async I/O requests, so we must submit this request
         * synchronously.
         */
-       if (!is_sync_kiocb(iocb) && (offset + count > i_size))
+       if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
                io->async = false;
 
        if (rw == WRITE)
@@ -2424,7 +2438,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
 
                /* we have a non-extending, async request, so return */
-               if (ret > 0 && !is_sync_kiocb(iocb))
+               if (!is_sync_kiocb(iocb))
                        return -EIOCBQUEUED;
 
                ret = wait_on_sync_kiocb(iocb);
@@ -2446,6 +2460,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
                                loff_t length)
 {
        struct fuse_file *ff = file->private_data;
+       struct inode *inode = file->f_inode;
        struct fuse_conn *fc = ff->fc;
        struct fuse_req *req;
        struct fuse_fallocate_in inarg = {
@@ -2459,9 +2474,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        if (fc->no_fallocate)
                return -EOPNOTSUPP;
 
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               mutex_lock(&inode->i_mutex);
+               fuse_set_nowrite(inode);
+       }
+
        req = fuse_get_req_nopages(fc);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto out;
+       }
 
        req->in.h.opcode = FUSE_FALLOCATE;
        req->in.h.nodeid = ff->nodeid;
@@ -2476,6 +2498,24 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        }
        fuse_put_request(fc, req);
 
+       if (err)
+               goto out;
+
+       /* we could have extended the file */
+       if (!(mode & FALLOC_FL_KEEP_SIZE))
+               fuse_write_update_size(inode, offset + length);
+
+       if (mode & FALLOC_FL_PUNCH_HOLE)
+               truncate_pagecache_range(inode, offset, offset + length - 1);
+
+       fuse_invalidate_attr(inode);
+
+out:
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               fuse_release_nowrite(inode);
+               mutex_unlock(&inode->i_mutex);
+       }
+
        return err;
 }
 
index 6201f81e4d3a54bdc2d1d4a145253d7ab199baa9..9a0cdde14a088c43e12e5d5fc4ba84257ca98b46 100644 (file)
@@ -867,10 +867,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->dont_mask = 1;
                        if (arg->flags & FUSE_AUTO_INVAL_DATA)
                                fc->auto_inval_data = 1;
-                       if (arg->flags & FUSE_DO_READDIRPLUS)
+                       if (arg->flags & FUSE_DO_READDIRPLUS) {
                                fc->do_readdirplus = 1;
-                       if (arg->flags & FUSE_READDIRPLUS_AUTO)
-                               fc->readdirplus_auto = 1;
+                               if (arg->flags & FUSE_READDIRPLUS_AUTO)
+                                       fc->readdirplus_auto = 1;
+                       }
                        if (arg->flags & FUSE_ASYNC_DIO)
                                fc->async_dio = 1;
                } else {
index eb08c9e43c2afb13947ca557f1a62da4626c5071..5a376ab81feb9021620cdb13b0cd1642059ead43 100644 (file)
@@ -26,7 +26,7 @@ config GFS2_FS
 config GFS2_FS_LOCKING_DLM
        bool "GFS2 DLM locking"
        depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \
-               HOTPLUG && DLM && CONFIGFS_FS && SYSFS
+               HOTPLUG && CONFIGFS_FS && SYSFS && (DLM=y || DLM=GFS2_FS)
        help
          Multiple node locking module for GFS2
 
index 1dc9a13ce6bb2f160cdd98a857ac48496c64dffe..93b5809c20bb347a9de3e5f0d46949c3dcf78dc5 100644 (file)
@@ -1286,17 +1286,26 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
        if (ret)
                return ret;
 
+       ret = get_write_access(inode);
+       if (ret)
+               return ret;
+
        inode_dio_wait(inode);
 
        ret = gfs2_rs_alloc(GFS2_I(inode));
        if (ret)
-               return ret;
+               goto out;
 
        oldsize = inode->i_size;
-       if (newsize >= oldsize)
-               return do_grow(inode, newsize);
+       if (newsize >= oldsize) {
+               ret = do_grow(inode, newsize);
+               goto out;
+       }
 
-       return do_shrink(inode, oldsize, newsize);
+       ret = do_shrink(inode, oldsize, newsize);
+out:
+       put_write_access(inode);
+       return ret;
 }
 
 int gfs2_truncatei_resume(struct gfs2_inode *ip)
index c3e82bd23179533e83dc5bf57186be025cdc2e9e..b631c904346084b5fb7b7d96985e724e7b4173a3 100644 (file)
@@ -354,22 +354,31 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
                return ERR_PTR(-EIO);
        }
 
-       hc = kmalloc(hsize, GFP_NOFS);
-       ret = -ENOMEM;
+       hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
+       if (hc == NULL)
+               hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL);
+
        if (hc == NULL)
                return ERR_PTR(-ENOMEM);
 
        ret = gfs2_dir_read_data(ip, hc, hsize);
        if (ret < 0) {
-               kfree(hc);
+               if (is_vmalloc_addr(hc))
+                       vfree(hc);
+               else
+                       kfree(hc);
                return ERR_PTR(ret);
        }
 
        spin_lock(&inode->i_lock);
-       if (ip->i_hash_cache)
-               kfree(hc);
-       else
+       if (ip->i_hash_cache) {
+               if (is_vmalloc_addr(hc))
+                       vfree(hc);
+               else
+                       kfree(hc);
+       } else {
                ip->i_hash_cache = hc;
+       }
        spin_unlock(&inode->i_lock);
 
        return ip->i_hash_cache;
@@ -385,7 +394,10 @@ void gfs2_dir_hash_inval(struct gfs2_inode *ip)
 {
        __be64 *hc = ip->i_hash_cache;
        ip->i_hash_cache = NULL;
-       kfree(hc);
+       if (is_vmalloc_addr(hc))
+               vfree(hc);
+       else
+               kfree(hc);
 }
 
 static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
@@ -1113,7 +1125,10 @@ static int dir_double_exhash(struct gfs2_inode *dip)
        if (IS_ERR(hc))
                return PTR_ERR(hc);
 
-       h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS);
+       h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
+       if (hc2 == NULL)
+               hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
+
        if (!hc2)
                return -ENOMEM;
 
@@ -1145,7 +1160,10 @@ fail:
        gfs2_dinode_out(dip, dibh->b_data);
        brelse(dibh);
 out_kfree:
-       kfree(hc2);
+       if (is_vmalloc_addr(hc2))
+               vfree(hc2);
+       else
+               kfree(hc2);
        return error;
 }
 
@@ -1846,6 +1864,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
        memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
        ht = kzalloc(size, GFP_NOFS);
+       if (ht == NULL)
+               ht = vzalloc(size);
        if (!ht)
                return -ENOMEM;
 
@@ -1933,7 +1953,10 @@ out_rlist:
        gfs2_rlist_free(&rlist);
        gfs2_quota_unhold(dip);
 out:
-       kfree(ht);
+       if (is_vmalloc_addr(ht))
+               vfree(ht);
+       else
+               kfree(ht);
        return error;
 }
 
index acd16764b133aa896bac5bea77a9bb481e07c74c..ad0dc38d87ab74dd7695a74b683a2baf7d3620d3 100644 (file)
@@ -402,16 +402,20 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Update file times before taking page lock */
        file_update_time(vma->vm_file);
 
+       ret = get_write_access(inode);
+       if (ret)
+               goto out;
+
        ret = gfs2_rs_alloc(ip);
        if (ret)
-               return ret;
+               goto out_write_access;
 
        gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
        ret = gfs2_glock_nq(&gh);
        if (ret)
-               goto out;
+               goto out_uninit;
 
        set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
        set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -480,12 +484,15 @@ out_quota_unlock:
        gfs2_quota_unlock(ip);
 out_unlock:
        gfs2_glock_dq(&gh);
-out:
+out_uninit:
        gfs2_holder_uninit(&gh);
        if (ret == 0) {
                set_page_dirty(page);
                wait_for_stable_page(page);
        }
+out_write_access:
+       put_write_access(inode);
+out:
        sb_end_pagefault(inode->i_sb);
        return block_page_mkwrite_return(ret);
 }
@@ -594,10 +601,10 @@ static int gfs2_release(struct inode *inode, struct file *file)
        kfree(file->private_data);
        file->private_data = NULL;
 
-       if ((file->f_mode & FMODE_WRITE) &&
-           (atomic_read(&inode->i_writecount) == 1))
-               gfs2_rs_delete(ip);
+       if (!(file->f_mode & FMODE_WRITE))
+               return 0;
 
+       gfs2_rs_delete(ip);
        return 0;
 }
 
index 8833a4f264e3a2e1aefc79af532d0d83920185b5..62b484e4a9e4e46ac905e9a24e8afdce330225d2 100644 (file)
@@ -189,6 +189,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
        return inode;
 
 fail_refresh:
+       ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
        ip->i_iopen_gh.gh_gl->gl_object = NULL;
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_iopen:
index c5fa758fd8446e1938036be9cdedaf75e2bc552b..6c33d7b6e0c4e26b6b02d74d8d5e090f26c820e9 100644 (file)
@@ -212,7 +212,7 @@ static void gfs2_end_log_write(struct bio *bio, int error)
                fs_err(sdp, "Error %d writing to log\n", error);
        }
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment_all(bvec, bio, i) {
                page = bvec->bv_page;
                if (page_has_buffers(page))
                        gfs2_end_log_write_bh(sdp, bvec, error);
@@ -419,7 +419,9 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
                if (total > limit)
                        num = limit;
                gfs2_log_unlock(sdp);
-               page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
+               page = gfs2_get_log_desc(sdp,
+                                        is_databuf ? GFS2_LOG_DESC_JDATA :
+                                        GFS2_LOG_DESC_METADATA, num + 1, num);
                ld = page_address(page);
                gfs2_log_lock(sdp);
                ptr = (__be64 *)(ld + 1);
index c7c840e916f82f333861ac7998ccf53710cc34d8..c253b13722e8a8e2fc08b9708da11f74fe3b270a 100644 (file)
@@ -121,7 +121,7 @@ static u64 qd2index(struct gfs2_quota_data *qd)
 {
        struct kqid qid = qd->qd_id;
        return (2 * (u64)from_kqid(&init_user_ns, qid)) +
-               (qid.type == USRQUOTA) ? 0 : 1;
+               ((qid.type == USRQUOTA) ? 0 : 1);
 }
 
 static u64 qd2offset(struct gfs2_quota_data *qd)
@@ -721,7 +721,7 @@ get_a_page:
                        goto unlock_out;
        }
 
-       gfs2_trans_add_meta(ip->i_gl, bh);
+       gfs2_trans_add_data(ip->i_gl, bh);
 
        kaddr = kmap_atomic(page);
        if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
index 0c5a575b513ea2e28e4f4a4065de97db365fe576..9809156e3d044e9ca4c39dd9b58c69105f282ab0 100644 (file)
@@ -638,8 +638,10 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
  */
 void gfs2_rs_delete(struct gfs2_inode *ip)
 {
+       struct inode *inode = &ip->i_inode;
+
        down_write(&ip->i_rw_mutex);
-       if (ip->i_res) {
+       if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
                gfs2_rs_deltree(ip->i_res);
                BUG_ON(ip->i_res->rs_free);
                kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -1401,9 +1403,14 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
        u32 extlen;
        u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
        int ret;
+       struct inode *inode = &ip->i_inode;
 
-       extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
-       extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+       if (S_ISDIR(inode->i_mode))
+               extlen = 1;
+       else {
+               extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
+               extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+       }
        if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
                return;
 
index 917c8e1eb4ae5e1889e290c6fda4a73bcb869bbe..e5639dec66c49dc7361ff0ed79828301031b675d 100644 (file)
@@ -1444,6 +1444,7 @@ static void gfs2_evict_inode(struct inode *inode)
        /* Must not read inode block until block type has been verified */
        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
        if (unlikely(error)) {
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
                goto out;
        }
@@ -1514,8 +1515,10 @@ out_unlock:
        if (gfs2_rs_active(ip->i_res))
                gfs2_rs_deltree(ip->i_res);
 
-       if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
+       if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq(&ip->i_iopen_gh);
+       }
        gfs2_holder_uninit(&ip->i_iopen_gh);
        gfs2_glock_dq_uninit(&gh);
        if (error && error != GLR_TRYFAILED && error != -EROFS)
@@ -1534,6 +1537,7 @@ out:
        ip->i_gl = NULL;
        if (ip->i_iopen_gh.gh_gl) {
                ip->i_iopen_gh.gh_gl->gl_object = NULL;
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
        }
 }
index f3b1a15ccd5930e72896bdbaf3213460c3bb9f34..d3fa6bd9503e762c861debdd4fe64bef546bb78f 100644 (file)
@@ -415,7 +415,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
        spin_lock(&tree->hash_lock);
        node = hfs_bnode_findhash(tree, num);
        spin_unlock(&tree->hash_lock);
-       BUG_ON(node);
+       if (node) {
+               pr_crit("new node %u already hashed?\n", num);
+               WARN_ON(1);
+               return node;
+       }
        node = __hfs_bnode_create(tree, num);
        if (!node)
                return ERR_PTR(-ENOMEM);
index 546f6d39713aa97be0e3d03f8dc115317a481ac7..834ac13c04b7976442a72b8c7ee20fa3d8b443b2 100644 (file)
@@ -33,25 +33,27 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
        if (whence == SEEK_DATA || whence == SEEK_HOLE)
                return -EINVAL;
 
+       mutex_lock(&i->i_mutex);
        hpfs_lock(s);
 
        /*printk("dir lseek\n");*/
        if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
-       mutex_lock(&i->i_mutex);
        pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
        while (pos != new_off) {
                if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
                else goto fail;
                if (pos == 12) goto fail;
        }
-       mutex_unlock(&i->i_mutex);
+       hpfs_add_pos(i, &filp->f_pos);
 ok:
+       filp->f_pos = new_off;
        hpfs_unlock(s);
-       return filp->f_pos = new_off;
-fail:
        mutex_unlock(&i->i_mutex);
+       return new_off;
+fail:
        /*printk("illegal lseek: %016llx\n", new_off);*/
        hpfs_unlock(s);
+       mutex_unlock(&i->i_mutex);
        return -ESPIPE;
 }
 
index 3027f4dbbab5195990a13d763055f229fa06213a..e4ba5fe4c3b52fad13ffcfd18fb2ad962c53c2f5 100644 (file)
@@ -109,10 +109,14 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
 {
        struct inode *inode = mapping->host;
 
+       hpfs_lock(inode->i_sb);
+
        if (to > inode->i_size) {
                truncate_pagecache(inode, to, inode->i_size);
                hpfs_truncate(inode);
        }
+
+       hpfs_unlock(inode->i_sb);
 }
 
 static int hpfs_write_begin(struct file *file, struct address_space *mapping,
index c57499dca89c5a3910bcefc5af951179aa693f24..360d27c488873825fed5c04f8bb2320a51a39d62 100644 (file)
@@ -2009,7 +2009,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
-       submit_bio(READ_SYNC, bio);
+       /*check if journaling to disk has been disabled*/
+       if (log->no_integrity) {
+               bio->bi_size = 0;
+               lbmIODone(bio, 0);
+       } else {
+               submit_bio(READ_SYNC, bio);
+       }
 
        wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
 
index 2003e830ed1c0d890f1fe0944db8dff04dab8ee6..788e0a9c1fb09cfb6d80ee65e1747310c7c840d0 100644 (file)
@@ -611,11 +611,28 @@ static int jfs_freeze(struct super_block *sb)
 {
        struct jfs_sb_info *sbi = JFS_SBI(sb);
        struct jfs_log *log = sbi->log;
+       int rc = 0;
 
        if (!(sb->s_flags & MS_RDONLY)) {
                txQuiesce(sb);
-               lmLogShutdown(log);
-               updateSuper(sb, FM_CLEAN);
+               rc = lmLogShutdown(log);
+               if (rc) {
+                       jfs_error(sb, "jfs_freeze: lmLogShutdown failed");
+
+                       /* let operations fail rather than hang */
+                       txResume(sb);
+
+                       return rc;
+               }
+               rc = updateSuper(sb, FM_CLEAN);
+               if (rc) {
+                       jfs_err("jfs_freeze: updateSuper failed\n");
+                       /*
+                        * Don't fail here. Everything succeeded except
+                        * marking the superblock clean, so there's really
+                        * no harm in leaving it frozen for now.
+                        */
+               }
        }
        return 0;
 }
@@ -627,13 +644,18 @@ static int jfs_unfreeze(struct super_block *sb)
        int rc = 0;
 
        if (!(sb->s_flags & MS_RDONLY)) {
-               updateSuper(sb, FM_MOUNT);
-               if ((rc = lmLogInit(log)))
-                       jfs_err("jfs_unlock failed with return code %d", rc);
-               else
-                       txResume(sb);
+               rc = updateSuper(sb, FM_MOUNT);
+               if (rc) {
+                       jfs_error(sb, "jfs_unfreeze: updateSuper failed");
+                       goto out;
+               }
+               rc = lmLogInit(log);
+               if (rc)
+                       jfs_error(sb, "jfs_unfreeze: lmLogInit failed");
+out:
+               txResume(sb);
        }
-       return 0;
+       return rc;
 }
 
 static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
index 85e40d1c0a8fd64b358447ee09b26f906102ee1d..9ed9361223c08f30ebbcca48e165588adeb31378 100644 (file)
@@ -1976,7 +1976,7 @@ static int path_lookupat(int dfd, const char *name,
                err = complete_walk(nd);
 
        if (!err && nd->flags & LOOKUP_DIRECTORY) {
-               if (!nd->inode->i_op->lookup) {
+               if (!can_lookup(nd->inode)) {
                        path_put(&nd->path);
                        err = -ENOTDIR;
                }
@@ -2850,7 +2850,7 @@ finish_lookup:
        if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
                goto out;
        error = -ENOTDIR;
-       if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
+       if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode))
                goto out;
        audit_inode(name, nd->path.dentry, 0);
 finish_open:
index 8163260936561f07834f90beb2055dcd9300e1a6..6792ce11f2bfc19757c0a5ac0061073a5b42a900 100644 (file)
@@ -1029,15 +1029,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
        DPRINTK("ncp_rmdir: removing %s/%s\n",
                dentry->d_parent->d_name.name, dentry->d_name.name);
 
-       /*
-        * fail with EBUSY if there are still references to this
-        * directory.
-        */
-       dentry_unhash(dentry);
-       error = -EBUSY;
-       if (!d_unhashed(dentry))
-               goto out;
-
        len = sizeof(__name);
        error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
                           dentry->d_name.len, !ncp_preserve_case(dir));
index a13d26ede254357d46bc1f24c7c404555048987f..0bc27684ebfa338d0b77c295427c31472ec66049 100644 (file)
@@ -414,7 +414,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
 
        spin_lock(&tbl->slot_tbl_lock);
        /* state manager is resetting the session */
-       if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+       if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
                spin_unlock(&tbl->slot_tbl_lock);
                status = htonl(NFS4ERR_DELAY);
                /* Return NFS4ERR_BADSESSION if we're draining the session
index 59461c957d9d7ed7204e09f9fad1aa0b154d8486..a35582c9d4440f8fe907192427b5c4f9a9a3b061 100644 (file)
@@ -763,7 +763,7 @@ static void nfs4_callback_free_slot(struct nfs4_session *session)
         * A single slot, so highest used slotid is either 0 or -1
         */
        tbl->highest_used_slotid = NFS4_NO_SLOT;
-       nfs4_session_drain_complete(session, tbl);
+       nfs4_slot_tbl_drain_complete(tbl);
        spin_unlock(&tbl->slot_tbl_lock);
 }
 
index 947b0c908aa908c643140dd7087b0927753c6d49..4cbad5d6b276f8c481984c166869e2679c2e241e 100644 (file)
@@ -203,7 +203,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
        error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
        if (error == -EINVAL)
-               error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_NULL);
+               error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
        if (error < 0)
                goto error;
 
index 8fbc100541154cbd31253dc261eb5eaafbe71e29..d7ba5616989c49fe52d396d41187a663744598f1 100644 (file)
@@ -572,7 +572,7 @@ int nfs41_setup_sequence(struct nfs4_session *session,
        task->tk_timeout = 0;
 
        spin_lock(&tbl->slot_tbl_lock);
-       if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
+       if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
            !args->sa_privileged) {
                /* The state manager will wait until the slot table is empty */
                dprintk("%s session is draining\n", __func__);
@@ -1078,7 +1078,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
        struct nfs4_state *state = opendata->state;
        struct nfs_inode *nfsi = NFS_I(state->inode);
        struct nfs_delegation *delegation;
-       int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
+       int open_mode = opendata->o_arg.open_flags;
        fmode_t fmode = opendata->o_arg.fmode;
        nfs4_stateid stateid;
        int ret = -EAGAIN;
index ebda5f4a031b74d2c890d4c1e6cf932d869e8f2b..c4e225e4a9afc382b1eb689314ec2e7875cc1a45 100644 (file)
@@ -73,7 +73,7 @@ void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
                        tbl->highest_used_slotid = new_max;
                else {
                        tbl->highest_used_slotid = NFS4_NO_SLOT;
-                       nfs4_session_drain_complete(tbl->session, tbl);
+                       nfs4_slot_tbl_drain_complete(tbl);
                }
        }
        dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
@@ -226,7 +226,7 @@ static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
        struct nfs4_slot *slot = pslot;
        struct nfs4_slot_table *tbl = slot->table;
 
-       if (nfs4_session_draining(tbl->session) && !args->sa_privileged)
+       if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
                return false;
        slot->generation = tbl->generation;
        args->sa_slot = slot;
index 6f3cb39386d4e400d509e6526924fa0b5233ed74..ff7d9f0f8a65179fbf9bc795c4b1cdbf1ee56bf1 100644 (file)
@@ -25,6 +25,10 @@ struct nfs4_slot {
 };
 
 /* Sessions */
+enum nfs4_slot_tbl_state {
+       NFS4_SLOT_TBL_DRAINING,
+};
+
 #define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
 struct nfs4_slot_table {
        struct nfs4_session *session;           /* Parent session */
@@ -43,6 +47,7 @@ struct nfs4_slot_table {
        unsigned long   generation;             /* Generation counter for
                                                   target_highest_slotid */
        struct completion complete;
+       unsigned long   slot_tbl_state;
 };
 
 /*
@@ -68,7 +73,6 @@ struct nfs4_session {
 
 enum nfs4_session_state {
        NFS4_SESSION_INITING,
-       NFS4_SESSION_DRAINING,
 };
 
 #if defined(CONFIG_NFS_V4_1)
@@ -88,12 +92,11 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
 extern int nfs4_init_session(struct nfs_server *server);
 extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
 
-extern void nfs4_session_drain_complete(struct nfs4_session *session,
-               struct nfs4_slot_table *tbl);
+extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
 
-static inline bool nfs4_session_draining(struct nfs4_session *session)
+static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl)
 {
-       return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state);
+       return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
 }
 
 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
index 300d17d85c0e03397d352746fc6fc775b39941e1..1fab140764c42756867f064a81b61903b8858a91 100644 (file)
@@ -241,7 +241,7 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
        if (ses == NULL)
                return;
        tbl = &ses->fc_slot_table;
-       if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
+       if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
                spin_lock(&tbl->slot_tbl_lock);
                nfs41_wake_slot_table(tbl);
                spin_unlock(&tbl->slot_tbl_lock);
@@ -251,15 +251,15 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
 /*
  * Signal state manager thread if session fore channel is drained
  */
-void nfs4_session_drain_complete(struct nfs4_session *session,
-               struct nfs4_slot_table *tbl)
+void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
 {
-       if (nfs4_session_draining(session))
+       if (nfs4_slot_tbl_draining(tbl))
                complete(&tbl->complete);
 }
 
-static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
+static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
 {
+       set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
        spin_lock(&tbl->slot_tbl_lock);
        if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
                INIT_COMPLETION(tbl->complete);
@@ -275,13 +275,12 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
        struct nfs4_session *ses = clp->cl_session;
        int ret = 0;
 
-       set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
        /* back channel */
-       ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+       ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
        if (ret)
                return ret;
        /* fore channel */
-       return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+       return nfs4_drain_slot_tbl(&ses->fc_slot_table);
 }
 
 static void nfs41_finish_session_reset(struct nfs_client *clp)
index a366107a7331ad36864ba81b8b14ba940756ac70..2d7525fbcf250225981ab521da638fc8f2a204d5 100644 (file)
@@ -1942,6 +1942,7 @@ static int nfs23_validate_mount_data(void *options,
                args->namlen            = data->namlen;
                args->bsize             = data->bsize;
 
+               args->auth_flavors[0] = RPC_AUTH_UNIX;
                if (data->flags & NFS_MOUNT_SECFLAVOUR)
                        args->auth_flavors[0] = data->pseudoflavor;
                if (!args->nfs_server.hostname)
@@ -2637,6 +2638,7 @@ static int nfs4_validate_mount_data(void *options,
                        goto out_no_address;
                args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
 
+               args->auth_flavors[0] = RPC_AUTH_UNIX;
                if (data->auth_flavourlen) {
                        if (data->auth_flavourlen > 1)
                                goto out_inval_auth;
index 689fb608648e9a80db3c4e643feb3fe00ff1cfa4..bccfec8343c5ee34925cea97b8fc4006f8265084 100644 (file)
@@ -219,13 +219,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 
 static int nilfs_set_page_dirty(struct page *page)
 {
-       int ret = __set_page_dirty_buffers(page);
+       int ret = __set_page_dirty_nobuffers(page);
 
-       if (ret) {
+       if (page_has_buffers(page)) {
                struct inode *inode = page->mapping->host;
-               unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
+               unsigned nr_dirty = 0;
+               struct buffer_head *bh, *head;
 
-               nilfs_set_file_dirty(inode, nr_dirty);
+               /*
+                * This page is locked by callers, and no other thread
+                * concurrently marks its buffers dirty since they are
+                * only dirtied through routines in fs/buffer.c in
+                * which call sites of mark_buffer_dirty are protected
+                * by page lock.
+                */
+               bh = head = page_buffers(page);
+               do {
+                       /* Do not mark hole blocks dirty */
+                       if (buffer_dirty(bh) || !buffer_mapped(bh))
+                               continue;
+
+                       set_buffer_dirty(bh);
+                       nr_dirty++;
+               } while (bh = bh->b_this_page, bh != head);
+
+               if (nr_dirty)
+                       nilfs_set_file_dirty(inode, nr_dirty);
        }
        return ret;
 }
index b3fdd1a323d6be070f2c85cdd4387fc2eb1547af..e68588e6b1e8eeb2f491f7b10b1c5a84d8ccb910 100644 (file)
@@ -1408,6 +1408,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
                                     mres->lockname_len, mres->lockname);
                                ret = -EFAULT;
                                spin_unlock(&res->spinlock);
+                               dlm_lockres_put(res);
                                goto leave;
                        }
                        res->state |= DLM_LOCK_RES_MIGRATING;
index 1c39efb71bab6d2c4926962c0a642d291e2b360b..2487116d0d3312981834aa3667fd708dc7aa05ab 100644 (file)
@@ -790,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                                                 &hole_size, &rec, &is_last);
                if (ret) {
                        mlog_errno(ret);
-                       goto out;
+                       goto out_unlock;
                }
 
                if (rec.e_blkno == 0ULL) {
index 8a7509f9e6f5e0f49438876228a8e887ac2cdeb1..ff54014a24ecd58511c37a16b33f5e27a1e3e6dd 100644 (file)
@@ -2288,7 +2288,7 @@ relock:
                ret = ocfs2_inode_lock(inode, NULL, 1);
                if (ret < 0) {
                        mlog_errno(ret);
-                       goto out_sems;
+                       goto out;
                }
 
                ocfs2_inode_unlock(inode, 1);
index 04ee1b57c243f01a54e59f4405e282e7c633d575..b4a5cdf9dbc57339b29530b343b5a582bcb39e36 100644 (file)
@@ -947,7 +947,7 @@ leave:
        ocfs2_free_dir_lookup_result(&orphan_insert);
        ocfs2_free_dir_lookup_result(&lookup);
 
-       if (status)
+       if (status && (status != -ENOTEMPTY))
                mlog_errno(status);
 
        return status;
@@ -2216,7 +2216,7 @@ out:
 
        brelse(orphan_dir_bh);
 
-       return 0;
+       return ret;
 }
 
 int ocfs2_create_inode_in_orphan(struct inode *dir,
index 3d2a7141b87a4b38a1aea383a68810ada1271404..9af0df15256e9405082fdb59d1a2c6fe97fb8e59 100644 (file)
@@ -83,7 +83,8 @@ static int do_make_slave(struct mount *mnt)
                if (peer_mnt == mnt)
                        peer_mnt = NULL;
        }
-       if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
+       if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
+           list_empty(&mnt->mnt_share))
                mnt_release_group_id(mnt);
 
        list_del_init(&mnt->mnt_share);
index dd51e50001fe7aff4e51ffac502d39158ae5581d..c3834dad09b3bce4dccec2180478d852ffddb70d 100644 (file)
@@ -2118,6 +2118,7 @@ static int show_timer(struct seq_file *m, void *v)
                nstr[notify & ~SIGEV_THREAD_ID],
                (notify & SIGEV_THREAD_ID) ? "tid" : "pid",
                pid_nr_ns(timer->it_pid, tp->ns));
+       seq_printf(m, "ClockID: %d\n", timer->it_clock);
 
        return 0;
 }
index bd4b5a740ff1b9de6492f5da9adaf7e500d35b25..bdfabdaefdceab967df948cd3509a39990d3fe8b 100644 (file)
@@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait;
 
 static int kmsg_open(struct inode * inode, struct file * file)
 {
-       return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
+       return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC);
 }
 
 static int kmsg_release(struct inode * inode, struct file * file)
 {
-       (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
+       (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC);
        return 0;
 }
 
@@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf,
                         size_t count, loff_t *ppos)
 {
        if ((file->f_flags & O_NONBLOCK) &&
-           !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
+           !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
                return -EAGAIN;
-       return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
+       return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC);
 }
 
 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
 {
        poll_wait(file, &log_wait, wait);
-       if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
+       if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
                return POLLIN | POLLRDNORM;
        return 0;
 }
index 8798d065e400fca8303e25b5665b0c1d7eaa579f..afa6be6fc39759edc22abca58d3e21677576b0b4 100644 (file)
@@ -120,7 +120,7 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
        struct inode *inode = file_inode(filp);
        struct super_block *s = inode->i_sb;
        struct qnx6_sb_info *sbi = QNX6_SB(s);
-       loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1);
+       loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
        unsigned long npages = dir_pages(inode);
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
index 66c53b642a880a1da6bf70c7d85a844dd18cbcea..6c2d136561cbd5db121285d97ee335fdf20db081 100644 (file)
@@ -204,6 +204,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
                                next_pos = deh_offset(deh) + 1;
 
                                if (item_moved(&tmp_ih, &path_to_entry)) {
+                                       set_cpu_key_k_offset(&pos_key,
+                                                            next_pos);
                                        goto research;
                                }
                        }       /* for */
index 77d6d47abc838be3a18acc837f9f97816974e6e1..f844533792ee99d7c7191f070869d1462cea52ab 100644 (file)
@@ -1811,11 +1811,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                                  TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
        memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
        args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
-       if (insert_inode_locked4(inode, args.objectid,
-                            reiserfs_find_actor, &args) < 0) {
+
+       reiserfs_write_unlock(inode->i_sb);
+       err = insert_inode_locked4(inode, args.objectid,
+                            reiserfs_find_actor, &args);
+       reiserfs_write_lock(inode->i_sb);
+       if (err) {
                err = -EINVAL;
                goto out_bad_inode;
        }
+
        if (old_format_only(sb))
                /* not a perfect generation count, as object ids can be reused, but
                 ** this is as good as reiserfs can do right now.
index 4cce1d9552fbbcd5f23447245a8b6f942c6fe89c..821bcf70e467432e14868b0c5bb00cb1b6940598 100644 (file)
@@ -318,7 +318,19 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
 static int chown_one_xattr(struct dentry *dentry, void *data)
 {
        struct iattr *attrs = data;
-       return reiserfs_setattr(dentry, attrs);
+       int ia_valid = attrs->ia_valid;
+       int err;
+
+       /*
+        * We only want the ownership bits. Otherwise, we'll do
+        * things like change a directory to a regular file if
+        * ATTR_MODE is set.
+        */
+       attrs->ia_valid &= (ATTR_UID|ATTR_GID);
+       err = reiserfs_setattr(dentry, attrs);
+       attrs->ia_valid = ia_valid;
+
+       return err;
 }
 
 /* No i_mutex, but the inode is unconnected. */
index d7c01ef64edab42ed59d022a2cf36f917e1935c0..6c8767fdfc6a287b0a9d201b4048d89b06fecaad 100644 (file)
@@ -443,6 +443,9 @@ int reiserfs_acl_chmod(struct inode *inode)
        int depth;
        int error;
 
+       if (IS_PRIVATE(inode))
+               return 0;
+
        if (S_ISLNK(inode->i_mode))
                return -EOPNOTSUPP;
 
index 8c1c96c27062a504bfc151f33d335fcd8f6779b5..79b876eb91dad25c91ea3715592c2b9f6655852b 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/rcupdate.h>
 #include <linux/hrtimer.h>
 #include <linux/sched/rt.h>
+#include <net/ll_poll.h>
 
 #include <asm/uaccess.h>
 
@@ -384,9 +385,10 @@ get_max:
 #define POLLEX_SET (POLLPRI)
 
 static inline void wait_key_set(poll_table *wait, unsigned long in,
-                               unsigned long out, unsigned long bit)
+                               unsigned long out, unsigned long bit,
+                               unsigned int ll_flag)
 {
-       wait->_key = POLLEX_SET;
+       wait->_key = POLLEX_SET | ll_flag;
        if (in & bit)
                wait->_key |= POLLIN_SET;
        if (out & bit)
@@ -400,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
        poll_table *wait;
        int retval, i, timed_out = 0;
        unsigned long slack = 0;
+       unsigned int ll_flag = POLL_LL;
+       u64 ll_time = ll_end_time();
 
        rcu_read_lock();
        retval = max_select_fd(n, fds);
@@ -422,6 +426,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
        retval = 0;
        for (;;) {
                unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
+               bool can_ll = false;
 
                inp = fds->in; outp = fds->out; exp = fds->ex;
                rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
@@ -449,7 +454,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
                                        f_op = f.file->f_op;
                                        mask = DEFAULT_POLLMASK;
                                        if (f_op && f_op->poll) {
-                                               wait_key_set(wait, in, out, bit);
+                                               wait_key_set(wait, in, out,
+                                                            bit, ll_flag);
                                                mask = (*f_op->poll)(f.file, wait);
                                        }
                                        fdput(f);
@@ -468,6 +474,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
                                                retval++;
                                                wait->_qproc = NULL;
                                        }
+                                       if (mask & POLL_LL)
+                                               can_ll = true;
+                                       /* got something, stop busy polling */
+                                       if (retval)
+                                               ll_flag = 0;
                                }
                        }
                        if (res_in)
@@ -486,6 +497,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
                        break;
                }
 
+               if (can_ll && can_poll_ll(ll_time))
+                       continue;
+
                /*
                 * If this is the first loop and we have a timeout
                 * given, then we convert to ktime_t and set the to
@@ -717,7 +731,8 @@ struct poll_list {
  * pwait poll_table will be used by the fd-provided poll handler for waiting,
  * if pwait->_qproc is non-NULL.
  */
-static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
+static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
+                                    bool *can_ll, unsigned int ll_flag)
 {
        unsigned int mask;
        int fd;
@@ -731,7 +746,10 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
                        mask = DEFAULT_POLLMASK;
                        if (f.file->f_op && f.file->f_op->poll) {
                                pwait->_key = pollfd->events|POLLERR|POLLHUP;
+                               pwait->_key |= ll_flag;
                                mask = f.file->f_op->poll(f.file, pwait);
+                               if (mask & POLL_LL)
+                                       *can_ll = true;
                        }
                        /* Mask out unneeded events. */
                        mask &= pollfd->events | POLLERR | POLLHUP;
@@ -750,6 +768,8 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
        ktime_t expire, *to = NULL;
        int timed_out = 0, count = 0;
        unsigned long slack = 0;
+       unsigned int ll_flag = POLL_LL;
+       u64 ll_time = ll_end_time();
 
        /* Optimise the no-wait case */
        if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -762,6 +782,7 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
 
        for (;;) {
                struct poll_list *walk;
+               bool can_ll = false;
 
                for (walk = list; walk != NULL; walk = walk->next) {
                        struct pollfd * pfd, * pfd_end;
@@ -776,9 +797,10 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
                                 * this. They'll get immediately deregistered
                                 * when we break out and return.
                                 */
-                               if (do_pollfd(pfd, pt)) {
+                               if (do_pollfd(pfd, pt, &can_ll, ll_flag)) {
                                        count++;
                                        pt->_qproc = NULL;
+                                       ll_flag = 0;
                                }
                        }
                }
@@ -795,6 +817,8 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
                if (count || timed_out)
                        break;
 
+               if (can_ll && can_poll_ll(ll_time))
+                       continue;
                /*
                 * If this is the first loop and we have a timeout
                 * given, then we convert to ktime_t and set the to
index 1d32f1d5276339e96b0034e13050eaeb39dba653..306d883d89bc7d6420ca4b5b8c5f848e573249bf 100644 (file)
@@ -21,6 +21,8 @@
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_vnodeops.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
 #include "xfs_trace.h"
 #include <linux/slab.h>
 #include <linux/xattr.h>
@@ -34,7 +36,9 @@
  */
 
 STATIC struct posix_acl *
-xfs_acl_from_disk(struct xfs_acl *aclp)
+xfs_acl_from_disk(
+       struct xfs_acl  *aclp,
+       int             max_entries)
 {
        struct posix_acl_entry *acl_e;
        struct posix_acl *acl;
@@ -42,7 +46,7 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
        unsigned int count, i;
 
        count = be32_to_cpu(aclp->acl_cnt);
-       if (count > XFS_ACL_MAX_ENTRIES)
+       if (count > max_entries)
                return ERR_PTR(-EFSCORRUPTED);
 
        acl = posix_acl_alloc(count, GFP_KERNEL);
@@ -108,9 +112,9 @@ xfs_get_acl(struct inode *inode, int type)
        struct xfs_inode *ip = XFS_I(inode);
        struct posix_acl *acl;
        struct xfs_acl *xfs_acl;
-       int len = sizeof(struct xfs_acl);
        unsigned char *ea_name;
        int error;
+       int len;
 
        acl = get_cached_acl(inode, type);
        if (acl != ACL_NOT_CACHED)
@@ -133,8 +137,8 @@ xfs_get_acl(struct inode *inode, int type)
         * If we have a cached ACLs value just return it, not need to
         * go out to the disk.
         */
-
-       xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
+       len = XFS_ACL_MAX_SIZE(ip->i_mount);
+       xfs_acl = kzalloc(len, GFP_KERNEL);
        if (!xfs_acl)
                return ERR_PTR(-ENOMEM);
 
@@ -153,7 +157,7 @@ xfs_get_acl(struct inode *inode, int type)
                goto out;
        }
 
-       acl = xfs_acl_from_disk(xfs_acl);
+       acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount));
        if (IS_ERR(acl))
                goto out;
 
@@ -189,16 +193,17 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
 
        if (acl) {
                struct xfs_acl *xfs_acl;
-               int len;
+               int len = XFS_ACL_MAX_SIZE(ip->i_mount);
 
-               xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
+               xfs_acl = kzalloc(len, GFP_KERNEL);
                if (!xfs_acl)
                        return -ENOMEM;
 
                xfs_acl_to_disk(xfs_acl, acl);
-               len = sizeof(struct xfs_acl) -
-                       (sizeof(struct xfs_acl_entry) *
-                        (XFS_ACL_MAX_ENTRIES - acl->a_count));
+
+               /* subtract away the unused acl entries */
+               len -= sizeof(struct xfs_acl_entry) *
+                        (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
 
                error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
                                len, ATTR_ROOT);
@@ -243,7 +248,7 @@ xfs_set_mode(struct inode *inode, umode_t mode)
 static int
 xfs_acl_exists(struct inode *inode, unsigned char *name)
 {
-       int len = sizeof(struct xfs_acl);
+       int len = XFS_ACL_MAX_SIZE(XFS_M(inode->i_sb));
 
        return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
                            ATTR_ROOT|ATTR_KERNOVAL) == 0);
@@ -379,7 +384,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
                goto out_release;
 
        error = -EINVAL;
-       if (acl->a_count > XFS_ACL_MAX_ENTRIES)
+       if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
                goto out_release;
 
        if (type == ACL_TYPE_ACCESS) {
index 39632d94135490ac94dee48ff5f037d29d5592ac..4016a567b83cc2e00d46b5f4629bfc83785a7dff 100644 (file)
@@ -22,19 +22,36 @@ struct inode;
 struct posix_acl;
 struct xfs_inode;
 
-#define XFS_ACL_MAX_ENTRIES 25
 #define XFS_ACL_NOT_PRESENT (-1)
 
 /* On-disk XFS access control list structure */
+struct xfs_acl_entry {
+       __be32  ae_tag;
+       __be32  ae_id;
+       __be16  ae_perm;
+       __be16  ae_pad;         /* fill the implicit hole in the structure */
+};
+
 struct xfs_acl {
-       __be32          acl_cnt;
-       struct xfs_acl_entry {
-               __be32  ae_tag;
-               __be32  ae_id;
-               __be16  ae_perm;
-       } acl_entry[XFS_ACL_MAX_ENTRIES];
+       __be32                  acl_cnt;
+       struct xfs_acl_entry    acl_entry[0];
 };
 
+/*
+ * The number of ACL entries allowed is defined by the on-disk format.
+ * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
+ * limited only by the maximum size of the xattr that stores the information.
+ */
+#define XFS_ACL_MAX_ENTRIES(mp)        \
+       (xfs_sb_version_hascrc(&mp->m_sb) \
+               ?  (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
+                                               sizeof(struct xfs_acl_entry) \
+               : 25)
+
+#define XFS_ACL_MAX_SIZE(mp) \
+       (sizeof(struct xfs_acl) + \
+               sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
+
 /* On-disk XFS extended attribute names */
 #define SGI_ACL_FILE           (unsigned char *)"SGI_ACL_FILE"
 #define SGI_ACL_DEFAULT                (unsigned char *)"SGI_ACL_DEFAULT"
index 2b2691b7342890e64e957d616dfb1c4aa2efc8c1..41a695048be7b09b87baf4517fd8124b6d7a8ffb 100644 (file)
@@ -725,6 +725,25 @@ xfs_convert_page(
                        (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
                        i_size_read(inode));
 
+       /*
+        * If the current map does not span the entire page we are about to try
+        * to write, then give up. The only way we can write a page that spans
+        * multiple mappings in a single writeback iteration is via the
+        * xfs_vm_writepage() function. Data integrity writeback requires the
+        * entire page to be written in a single attempt, otherwise the part of
+        * the page we don't write here doesn't get written as part of the data
+        * integrity sync.
+        *
+        * For normal writeback, we also don't attempt to write partial pages
+        * here as it simply means that write_cache_pages() will see it under
+        * writeback and ignore the page until some point in the future, at
+        * which time this will be the only page in the file that needs
+        * writeback.  Hence for more optimal IO patterns, we should always
+        * avoid partial page writeback due to multiple mappings on a page here.
+        */
+       if (!xfs_imap_valid(inode, imap, end_offset))
+               goto fail_unlock_page;
+
        len = 1 << inode->i_blkbits;
        p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
                                        PAGE_CACHE_SIZE);
index 08d5457c948e051866217792e2fcbc12d1f5c726..31d3cd12926918978f922edb6e0f01c22d29d864 100644 (file)
@@ -931,20 +931,22 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
  */
 int
 xfs_attr_shortform_allfit(
-       struct xfs_buf  *bp,
-       struct xfs_inode *dp)
+       struct xfs_buf          *bp,
+       struct xfs_inode        *dp)
 {
-       xfs_attr_leafblock_t *leaf;
-       xfs_attr_leaf_entry_t *entry;
+       struct xfs_attr_leafblock *leaf;
+       struct xfs_attr_leaf_entry *entry;
        xfs_attr_leaf_name_local_t *name_loc;
-       int bytes, i;
+       struct xfs_attr3_icleaf_hdr leafhdr;
+       int                     bytes;
+       int                     i;
 
        leaf = bp->b_addr;
-       ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+       xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+       entry = xfs_attr3_leaf_entryp(leaf);
 
-       entry = &leaf->entries[0];
        bytes = sizeof(struct xfs_attr_sf_hdr);
-       for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
+       for (i = 0; i < leafhdr.count; entry++, i++) {
                if (entry->flags & XFS_ATTR_INCOMPLETE)
                        continue;               /* don't copy partial entries */
                if (!(entry->flags & XFS_ATTR_LOCAL))
@@ -954,15 +956,15 @@ xfs_attr_shortform_allfit(
                        return(0);
                if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
                        return(0);
-               bytes += sizeof(struct xfs_attr_sf_entry)-1
+               bytes += sizeof(struct xfs_attr_sf_entry) - 1
                                + name_loc->namelen
                                + be16_to_cpu(name_loc->valuelen);
        }
        if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
            (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
            (bytes == sizeof(struct xfs_attr_sf_hdr)))
-               return(-1);
-       return(xfs_attr_shortform_bytesfit(dp, bytes));
+               return -1;
+       return xfs_attr_shortform_bytesfit(dp, bytes);
 }
 
 /*
@@ -1410,7 +1412,7 @@ xfs_attr3_leaf_add_work(
                name_rmt->valuelen = 0;
                name_rmt->valueblk = 0;
                args->rmtblkno = 1;
-               args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
+               args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
        }
        xfs_trans_log_buf(args->trans, bp,
             XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@@ -1443,11 +1445,12 @@ xfs_attr3_leaf_add_work(
 STATIC void
 xfs_attr3_leaf_compact(
        struct xfs_da_args      *args,
-       struct xfs_attr3_icleaf_hdr *ichdr_d,
+       struct xfs_attr3_icleaf_hdr *ichdr_dst,
        struct xfs_buf          *bp)
 {
-       xfs_attr_leafblock_t    *leaf_s, *leaf_d;
-       struct xfs_attr3_icleaf_hdr ichdr_s;
+       struct xfs_attr_leafblock *leaf_src;
+       struct xfs_attr_leafblock *leaf_dst;
+       struct xfs_attr3_icleaf_hdr ichdr_src;
        struct xfs_trans        *trans = args->trans;
        struct xfs_mount        *mp = trans->t_mountp;
        char                    *tmpbuffer;
@@ -1455,29 +1458,38 @@ xfs_attr3_leaf_compact(
        trace_xfs_attr_leaf_compact(args);
 
        tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
-       ASSERT(tmpbuffer != NULL);
        memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
        memset(bp->b_addr, 0, XFS_LBSIZE(mp));
+       leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
+       leaf_dst = bp->b_addr;
 
        /*
-        * Copy basic information
+        * Copy the on-disk header back into the destination buffer to ensure
+        * all the information in the header that is not part of the incore
+        * header structure is preserved.
         */
-       leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
-       leaf_d = bp->b_addr;
-       ichdr_s = *ichdr_d;     /* struct copy */
-       ichdr_d->firstused = XFS_LBSIZE(mp);
-       ichdr_d->usedbytes = 0;
-       ichdr_d->count = 0;
-       ichdr_d->holes = 0;
-       ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_s);
-       ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
+       memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
+
+       /* Initialise the incore headers */
+       ichdr_src = *ichdr_dst; /* struct copy */
+       ichdr_dst->firstused = XFS_LBSIZE(mp);
+       ichdr_dst->usedbytes = 0;
+       ichdr_dst->count = 0;
+       ichdr_dst->holes = 0;
+       ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
+       ichdr_dst->freemap[0].size = ichdr_dst->firstused -
+                                               ichdr_dst->freemap[0].base;
+
+
+       /* write the header back to initialise the underlying buffer */
+       xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
 
        /*
         * Copy all entry's in the same (sorted) order,
         * but allocate name/value pairs packed and in sequence.
         */
-       xfs_attr3_leaf_moveents(leaf_s, &ichdr_s, 0, leaf_d, ichdr_d, 0,
-                               ichdr_s.count, mp);
+       xfs_attr3_leaf_moveents(leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0,
+                               ichdr_src.count, mp);
        /*
         * this logs the entire buffer, but the caller must write the header
         * back to the buffer when it is finished modifying it.
@@ -2179,14 +2191,24 @@ xfs_attr3_leaf_unbalance(
                struct xfs_attr_leafblock *tmp_leaf;
                struct xfs_attr3_icleaf_hdr tmphdr;
 
-               tmp_leaf = kmem_alloc(state->blocksize, KM_SLEEP);
-               memset(tmp_leaf, 0, state->blocksize);
-               memset(&tmphdr, 0, sizeof(tmphdr));
+               tmp_leaf = kmem_zalloc(state->blocksize, KM_SLEEP);
+
+               /*
+                * Copy the header into the temp leaf so that all the stuff
+                * not in the incore header is present and gets copied back in
+                * once we've moved all the entries.
+                */
+               memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
 
+               memset(&tmphdr, 0, sizeof(tmphdr));
                tmphdr.magic = savehdr.magic;
                tmphdr.forw = savehdr.forw;
                tmphdr.back = savehdr.back;
                tmphdr.firstused = state->blocksize;
+
+               /* write the header to the temp buffer to initialise it */
+               xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr);
+
                if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
                                         drop_blk->bp, &drophdr)) {
                        xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
@@ -2330,9 +2352,11 @@ xfs_attr3_leaf_lookup_int(
                        if (!xfs_attr_namesp_match(args->flags, entry->flags))
                                continue;
                        args->index = probe;
+                       args->valuelen = be32_to_cpu(name_rmt->valuelen);
                        args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
-                       args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
-                                                  be32_to_cpu(name_rmt->valuelen));
+                       args->rmtblkcnt = xfs_attr3_rmt_blocks(
+                                                       args->dp->i_mount,
+                                                       args->valuelen);
                        return XFS_ERROR(EEXIST);
                }
        }
@@ -2383,7 +2407,8 @@ xfs_attr3_leaf_getvalue(
                ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
                valuelen = be32_to_cpu(name_rmt->valuelen);
                args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
-               args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
+               args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
+                                                      valuelen);
                if (args->flags & ATTR_KERNOVAL) {
                        args->valuelen = valuelen;
                        return 0;
@@ -2709,7 +2734,8 @@ xfs_attr3_leaf_list_int(
                                args.valuelen = valuelen;
                                args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
                                args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
-                               args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
+                               args.rmtblkcnt = xfs_attr3_rmt_blocks(
+                                                       args.dp->i_mount, valuelen);
                                retval = xfs_attr_rmtval_get(&args);
                                if (retval)
                                        return retval;
@@ -3232,7 +3258,7 @@ xfs_attr3_leaf_inactive(
                        name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
                        if (name_rmt->valueblk) {
                                lp->valueblk = be32_to_cpu(name_rmt->valueblk);
-                               lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
+                               lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
                                                    be32_to_cpu(name_rmt->valuelen));
                                lp++;
                        }
index f9d7846097e229cdef514b747cbbc43251166004..444a7704596c409f43f0ec495c9e6cc9838460c4 100644 (file)
@@ -128,6 +128,7 @@ struct xfs_attr3_leaf_hdr {
        __u8                    holes;
        __u8                    pad1;
        struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
+       __be32                  pad2;           /* 64 bit alignment */
 };
 
 #define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
index dee84466dcc934add7391861d6c83631a63bca62..ef6b0c124528f6bff8d59c0fee5fa31a1d5dcc8b 100644 (file)
  * Each contiguous block has a header, so it is not just a simple attribute
  * length to FSB conversion.
  */
-static int
+int
 xfs_attr3_rmt_blocks(
        struct xfs_mount *mp,
        int             attrlen)
 {
-       int             buflen = XFS_ATTR3_RMT_BUF_SPACE(mp,
-                                                        mp->m_sb.sb_blocksize);
-       return (attrlen + buflen - 1) / buflen;
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
+               return (attrlen + buflen - 1) / buflen;
+       }
+       return XFS_B_TO_FSB(mp, attrlen);
+}
+
+/*
+ * Checking of the remote attribute header is split into two parts. The verifier
+ * does CRC, location and bounds checking, the unpacking function checks the
+ * attribute parameters and owner.
+ */
+static bool
+xfs_attr3_rmt_hdr_ok(
+       struct xfs_mount        *mp,
+       void                    *ptr,
+       xfs_ino_t               ino,
+       uint32_t                offset,
+       uint32_t                size,
+       xfs_daddr_t             bno)
+{
+       struct xfs_attr3_rmt_hdr *rmt = ptr;
+
+       if (bno != be64_to_cpu(rmt->rm_blkno))
+               return false;
+       if (offset != be32_to_cpu(rmt->rm_offset))
+               return false;
+       if (size != be32_to_cpu(rmt->rm_bytes))
+               return false;
+       if (ino != be64_to_cpu(rmt->rm_owner))
+               return false;
+
+       /* ok */
+       return true;
 }
 
 static bool
 xfs_attr3_rmt_verify(
-       struct xfs_buf          *bp)
+       struct xfs_mount        *mp,
+       void                    *ptr,
+       int                     fsbsize,
+       xfs_daddr_t             bno)
 {
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-       struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+       struct xfs_attr3_rmt_hdr *rmt = ptr;
 
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return false;
@@ -70,7 +103,9 @@ xfs_attr3_rmt_verify(
                return false;
        if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
                return false;
-       if (bp->b_bn != be64_to_cpu(rmt->rm_blkno))
+       if (be64_to_cpu(rmt->rm_blkno) != bno)
+               return false;
+       if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
                return false;
        if (be32_to_cpu(rmt->rm_offset) +
                                be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
@@ -86,17 +121,40 @@ xfs_attr3_rmt_read_verify(
        struct xfs_buf  *bp)
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
+       char            *ptr;
+       int             len;
+       bool            corrupt = false;
+       xfs_daddr_t     bno;
 
        /* no verification of non-crc buffers */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return;
 
-       if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
-                             XFS_ATTR3_RMT_CRC_OFF) ||
-           !xfs_attr3_rmt_verify(bp)) {
+       ptr = bp->b_addr;
+       bno = bp->b_bn;
+       len = BBTOB(bp->b_length);
+       ASSERT(len >= XFS_LBSIZE(mp));
+
+       while (len > 0) {
+               if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
+                                     XFS_ATTR3_RMT_CRC_OFF)) {
+                       corrupt = true;
+                       break;
+               }
+               if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
+                       corrupt = true;
+                       break;
+               }
+               len -= XFS_LBSIZE(mp);
+               ptr += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
+       }
+
+       if (corrupt) {
                XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
                xfs_buf_ioerror(bp, EFSCORRUPTED);
-       }
+       } else
+               ASSERT(len == 0);
 }
 
 static void
@@ -105,23 +163,39 @@ xfs_attr3_rmt_write_verify(
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
        struct xfs_buf_log_item *bip = bp->b_fspriv;
+       char            *ptr;
+       int             len;
+       xfs_daddr_t     bno;
 
        /* no verification of non-crc buffers */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return;
 
-       if (!xfs_attr3_rmt_verify(bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-               return;
-       }
+       ptr = bp->b_addr;
+       bno = bp->b_bn;
+       len = BBTOB(bp->b_length);
+       ASSERT(len >= XFS_LBSIZE(mp));
+
+       while (len > 0) {
+               if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
+                       XFS_CORRUPTION_ERROR(__func__,
+                                           XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+                       xfs_buf_ioerror(bp, EFSCORRUPTED);
+                       return;
+               }
+               if (bip) {
+                       struct xfs_attr3_rmt_hdr *rmt;
+
+                       rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+                       rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+               }
+               xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
 
-       if (bip) {
-               struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
-               rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+               len -= XFS_LBSIZE(mp);
+               ptr += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
        }
-       xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
-                        XFS_ATTR3_RMT_CRC_OFF);
+       ASSERT(len == 0);
 }
 
 const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
@@ -129,15 +203,16 @@ const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
        .verify_write = xfs_attr3_rmt_write_verify,
 };
 
-static int
+STATIC int
 xfs_attr3_rmt_hdr_set(
        struct xfs_mount        *mp,
+       void                    *ptr,
        xfs_ino_t               ino,
        uint32_t                offset,
        uint32_t                size,
-       struct xfs_buf          *bp)
+       xfs_daddr_t             bno)
 {
-       struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+       struct xfs_attr3_rmt_hdr *rmt = ptr;
 
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return 0;
@@ -147,36 +222,107 @@ xfs_attr3_rmt_hdr_set(
        rmt->rm_bytes = cpu_to_be32(size);
        uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
        rmt->rm_owner = cpu_to_be64(ino);
-       rmt->rm_blkno = cpu_to_be64(bp->b_bn);
-       bp->b_ops = &xfs_attr3_rmt_buf_ops;
+       rmt->rm_blkno = cpu_to_be64(bno);
 
        return sizeof(struct xfs_attr3_rmt_hdr);
 }
 
 /*
- * Checking of the remote attribute header is split into two parts. the verifier
- * does CRC, location and bounds checking, the unpacking function checks the
- * attribute parameters and owner.
+ * Helper functions to copy attribute data in and out of the one disk extents
  */
-static bool
-xfs_attr3_rmt_hdr_ok(
-       struct xfs_mount        *mp,
-       xfs_ino_t               ino,
-       uint32_t                offset,
-       uint32_t                size,
-       struct xfs_buf          *bp)
+STATIC int
+xfs_attr_rmtval_copyout(
+       struct xfs_mount *mp,
+       struct xfs_buf  *bp,
+       xfs_ino_t       ino,
+       int             *offset,
+       int             *valuelen,
+       char            **dst)
 {
-       struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+       char            *src = bp->b_addr;
+       xfs_daddr_t     bno = bp->b_bn;
+       int             len = BBTOB(bp->b_length);
 
-       if (offset != be32_to_cpu(rmt->rm_offset))
-               return false;
-       if (size != be32_to_cpu(rmt->rm_bytes))
-               return false;
-       if (ino != be64_to_cpu(rmt->rm_owner))
-               return false;
+       ASSERT(len >= XFS_LBSIZE(mp));
 
-       /* ok */
-       return true;
+       while (len > 0 && *valuelen > 0) {
+               int hdr_size = 0;
+               int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
+
+               byte_cnt = min_t(int, *valuelen, byte_cnt);
+
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+                       if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
+                                                 byte_cnt, bno)) {
+                               xfs_alert(mp,
+"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
+                                       bno, *offset, byte_cnt, ino);
+                               return EFSCORRUPTED;
+                       }
+                       hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
+               }
+
+               memcpy(*dst, src + hdr_size, byte_cnt);
+
+               /* roll buffer forwards */
+               len -= XFS_LBSIZE(mp);
+               src += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
+
+               /* roll attribute data forwards */
+               *valuelen -= byte_cnt;
+               *dst += byte_cnt;
+               *offset += byte_cnt;
+       }
+       return 0;
+}
+
+STATIC void
+xfs_attr_rmtval_copyin(
+       struct xfs_mount *mp,
+       struct xfs_buf  *bp,
+       xfs_ino_t       ino,
+       int             *offset,
+       int             *valuelen,
+       char            **src)
+{
+       char            *dst = bp->b_addr;
+       xfs_daddr_t     bno = bp->b_bn;
+       int             len = BBTOB(bp->b_length);
+
+       ASSERT(len >= XFS_LBSIZE(mp));
+
+       while (len > 0 && *valuelen > 0) {
+               int hdr_size;
+               int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
+
+               byte_cnt = min(*valuelen, byte_cnt);
+               hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
+                                                byte_cnt, bno);
+
+               memcpy(dst + hdr_size, *src, byte_cnt);
+
+               /*
+                * If this is the last block, zero the remainder of it.
+                * Check that we are actually the last block, too.
+                */
+               if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) {
+                       ASSERT(*valuelen - byte_cnt == 0);
+                       ASSERT(len == XFS_LBSIZE(mp));
+                       memset(dst + hdr_size + byte_cnt, 0,
+                                       XFS_LBSIZE(mp) - hdr_size - byte_cnt);
+               }
+
+               /* roll buffer forwards */
+               len -= XFS_LBSIZE(mp);
+               dst += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
+
+               /* roll attribute data forwards */
+               *valuelen -= byte_cnt;
+               *src += byte_cnt;
+               *offset += byte_cnt;
+       }
 }
 
 /*
@@ -190,13 +336,12 @@ xfs_attr_rmtval_get(
        struct xfs_bmbt_irec    map[ATTR_RMTVALUE_MAPSIZE];
        struct xfs_mount        *mp = args->dp->i_mount;
        struct xfs_buf          *bp;
-       xfs_daddr_t             dblkno;
        xfs_dablk_t             lblkno = args->rmtblkno;
-       void                    *dst = args->value;
+       char                    *dst = args->value;
        int                     valuelen = args->valuelen;
        int                     nmap;
        int                     error;
-       int                     blkcnt;
+       int                     blkcnt = args->rmtblkcnt;
        int                     i;
        int                     offset = 0;
 
@@ -207,52 +352,36 @@ xfs_attr_rmtval_get(
        while (valuelen > 0) {
                nmap = ATTR_RMTVALUE_MAPSIZE;
                error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
-                                      args->rmtblkcnt, map, &nmap,
+                                      blkcnt, map, &nmap,
                                       XFS_BMAPI_ATTRFORK);
                if (error)
                        return error;
                ASSERT(nmap >= 1);
 
                for (i = 0; (i < nmap) && (valuelen > 0); i++) {
-                       int     byte_cnt;
-                       char    *src;
+                       xfs_daddr_t     dblkno;
+                       int             dblkcnt;
 
                        ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
                               (map[i].br_startblock != HOLESTARTBLOCK));
                        dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
-                       blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+                       dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
                        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
-                                                  dblkno, blkcnt, 0, &bp,
+                                                  dblkno, dblkcnt, 0, &bp,
                                                   &xfs_attr3_rmt_buf_ops);
                        if (error)
                                return error;
 
-                       byte_cnt = min_t(int, valuelen, BBTOB(bp->b_length));
-                       byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
-
-                       src = bp->b_addr;
-                       if (xfs_sb_version_hascrc(&mp->m_sb)) {
-                               if (!xfs_attr3_rmt_hdr_ok(mp, args->dp->i_ino,
-                                                       offset, byte_cnt, bp)) {
-                                       xfs_alert(mp,
-"remote attribute header does not match required off/len/owner (0x%x/Ox%x,0x%llx)",
-                                               offset, byte_cnt, args->dp->i_ino);
-                                       xfs_buf_relse(bp);
-                                       return EFSCORRUPTED;
-
-                               }
-
-                               src += sizeof(struct xfs_attr3_rmt_hdr);
-                       }
-
-                       memcpy(dst, src, byte_cnt);
+                       error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
+                                                       &offset, &valuelen,
+                                                       &dst);
                        xfs_buf_relse(bp);
+                       if (error)
+                               return error;
 
-                       offset += byte_cnt;
-                       dst += byte_cnt;
-                       valuelen -= byte_cnt;
-
+                       /* roll attribute extent map forwards */
                        lblkno += map[i].br_blockcount;
+                       blkcnt -= map[i].br_blockcount;
                }
        }
        ASSERT(valuelen == 0);
@@ -270,17 +399,13 @@ xfs_attr_rmtval_set(
        struct xfs_inode        *dp = args->dp;
        struct xfs_mount        *mp = dp->i_mount;
        struct xfs_bmbt_irec    map;
-       struct xfs_buf          *bp;
-       xfs_daddr_t             dblkno;
        xfs_dablk_t             lblkno;
        xfs_fileoff_t           lfileoff = 0;
-       void                    *src = args->value;
+       char                    *src = args->value;
        int                     blkcnt;
        int                     valuelen;
        int                     nmap;
        int                     error;
-       int                     hdrcnt = 0;
-       bool                    crcs = xfs_sb_version_hascrc(&mp->m_sb);
        int                     offset = 0;
 
        trace_xfs_attr_rmtval_set(args);
@@ -289,24 +414,14 @@ xfs_attr_rmtval_set(
         * Find a "hole" in the attribute address space large enough for
         * us to drop the new attribute's value into. Because CRC enable
         * attributes have headers, we can't just do a straight byte to FSB
-        * conversion. We calculate the worst case block count in this case
-        * and we may not need that many, so we have to handle this when
-        * allocating the blocks below. 
+        * conversion and have to take the header space into account.
         */
-       if (!crcs)
-               blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
-       else
-               blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
-
+       blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
        error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
                                                   XFS_ATTR_FORK);
        if (error)
                return error;
 
-       /* Start with the attribute data. We'll allocate the rest afterwards. */
-       if (crcs)
-               blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
-
        args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
        args->rmtblkcnt = blkcnt;
 
@@ -349,26 +464,6 @@ xfs_attr_rmtval_set(
                       (map.br_startblock != HOLESTARTBLOCK));
                lblkno += map.br_blockcount;
                blkcnt -= map.br_blockcount;
-               hdrcnt++;
-
-               /*
-                * If we have enough blocks for the attribute data, calculate
-                * how many extra blocks we need for headers. We might run
-                * through this multiple times in the case that the additional
-                * headers in the blocks needed for the data fragments spills
-                * into requiring more blocks. e.g. for 512 byte blocks, we'll
-                * spill for another block every 9 headers we require in this
-                * loop.
-                */
-               if (crcs && blkcnt == 0) {
-                       int total_len;
-
-                       total_len = args->valuelen +
-                                   hdrcnt * sizeof(struct xfs_attr3_rmt_hdr);
-                       blkcnt = XFS_B_TO_FSB(mp, total_len);
-                       blkcnt -= args->rmtblkcnt;
-                       args->rmtblkcnt += blkcnt;
-               }
 
                /*
                 * Start the next trans in the chain.
@@ -385,18 +480,19 @@ xfs_attr_rmtval_set(
         * the INCOMPLETE flag.
         */
        lblkno = args->rmtblkno;
+       blkcnt = args->rmtblkcnt;
        valuelen = args->valuelen;
        while (valuelen > 0) {
-               int     byte_cnt;
-               char    *buf;
+               struct xfs_buf  *bp;
+               xfs_daddr_t     dblkno;
+               int             dblkcnt;
+
+               ASSERT(blkcnt > 0);
 
-               /*
-                * Try to remember where we decided to put the value.
-                */
                xfs_bmap_init(args->flist, args->firstblock);
                nmap = 1;
                error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
-                                      args->rmtblkcnt, &map, &nmap,
+                                      blkcnt, &map, &nmap,
                                       XFS_BMAPI_ATTRFORK);
                if (error)
                        return(error);
@@ -405,41 +501,27 @@ xfs_attr_rmtval_set(
                       (map.br_startblock != HOLESTARTBLOCK));
 
                dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
-               blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+               dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
-               bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
+               bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
                if (!bp)
                        return ENOMEM;
                bp->b_ops = &xfs_attr3_rmt_buf_ops;
 
-               byte_cnt = BBTOB(bp->b_length);
-               byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
-               if (valuelen < byte_cnt)
-                       byte_cnt = valuelen;
-
-               buf = bp->b_addr;
-               buf += xfs_attr3_rmt_hdr_set(mp, dp->i_ino, offset,
-                                            byte_cnt, bp);
-               memcpy(buf, src, byte_cnt);
-
-               if (byte_cnt < BBTOB(bp->b_length))
-                       xfs_buf_zero(bp, byte_cnt,
-                                    BBTOB(bp->b_length) - byte_cnt);
+               xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
+                                      &valuelen, &src);
 
                error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
                xfs_buf_relse(bp);
                if (error)
                        return error;
 
-               src += byte_cnt;
-               valuelen -= byte_cnt;
-               offset += byte_cnt;
-               hdrcnt--;
 
+               /* roll attribute extent map forwards */
                lblkno += map.br_blockcount;
+               blkcnt -= map.br_blockcount;
        }
        ASSERT(valuelen == 0);
-       ASSERT(hdrcnt == 0);
        return 0;
 }
 
@@ -448,33 +530,40 @@ xfs_attr_rmtval_set(
  * out-of-line buffer that it is stored on.
  */
 int
-xfs_attr_rmtval_remove(xfs_da_args_t *args)
+xfs_attr_rmtval_remove(
+       struct xfs_da_args      *args)
 {
-       xfs_mount_t *mp;
-       xfs_bmbt_irec_t map;
-       xfs_buf_t *bp;
-       xfs_daddr_t dblkno;
-       xfs_dablk_t lblkno;
-       int valuelen, blkcnt, nmap, error, done, committed;
+       struct xfs_mount        *mp = args->dp->i_mount;
+       xfs_dablk_t             lblkno;
+       int                     blkcnt;
+       int                     error;
+       int                     done;
 
        trace_xfs_attr_rmtval_remove(args);
 
-       mp = args->dp->i_mount;
-
        /*
-        * Roll through the "value", invalidating the attribute value's
-        * blocks.
+        * Roll through the "value", invalidating the attribute value's blocks.
+        * Note that args->rmtblkcnt is the minimum number of data blocks we'll
+        * see for a CRC enabled remote attribute. Each extent will have a
+        * header, and so we may have more blocks than we realise here.  If we
+        * fail to map the blocks correctly, we'll have problems with the buffer
+        * lookups.
         */
        lblkno = args->rmtblkno;
-       valuelen = args->rmtblkcnt;
-       while (valuelen > 0) {
+       blkcnt = args->rmtblkcnt;
+       while (blkcnt > 0) {
+               struct xfs_bmbt_irec    map;
+               struct xfs_buf          *bp;
+               xfs_daddr_t             dblkno;
+               int                     dblkcnt;
+               int                     nmap;
+
                /*
                 * Try to remember where we decided to put the value.
                 */
                nmap = 1;
                error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
-                                      args->rmtblkcnt, &map, &nmap,
-                                      XFS_BMAPI_ATTRFORK);
+                                      blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
                if (error)
                        return(error);
                ASSERT(nmap == 1);
@@ -482,21 +571,20 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
                       (map.br_startblock != HOLESTARTBLOCK));
 
                dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
-               blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+               dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
                /*
                 * If the "remote" value is in the cache, remove it.
                 */
-               bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
+               bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
                if (bp) {
                        xfs_buf_stale(bp);
                        xfs_buf_relse(bp);
                        bp = NULL;
                }
 
-               valuelen -= map.br_blockcount;
-
                lblkno += map.br_blockcount;
+               blkcnt -= map.br_blockcount;
        }
 
        /*
@@ -506,6 +594,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
        blkcnt = args->rmtblkcnt;
        done = 0;
        while (!done) {
+               int committed;
+
                xfs_bmap_init(args->flist, args->firstblock);
                error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
                                    XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
index c7cca60a062a1ab1a45eb7f08632773e54d0a734..92a8fd7977cc2b48649ed3e5c22344fe83fcd8c1 100644 (file)
 
 #define XFS_ATTR3_RMT_MAGIC    0x5841524d      /* XARM */
 
+/*
+ * There is one of these headers per filesystem block in a remote attribute.
+ * This is done to ensure there is a 1:1 mapping between the attribute value
+ * length and the number of blocks needed to store the attribute. This makes the
+ * verification of a buffer a little more complex, but greatly simplifies the
+ * allocation, reading and writing of these attributes as we don't have to guess
+ * the number of blocks needed to store the attribute data.
+ */
 struct xfs_attr3_rmt_hdr {
        __be32  rm_magic;
        __be32  rm_offset;
@@ -39,6 +47,8 @@ struct xfs_attr3_rmt_hdr {
 
 extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
 
+int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
+
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
 int xfs_attr_rmtval_set(struct xfs_da_args *args);
 int xfs_attr_rmtval_remove(struct xfs_da_args *args);
index 8804b8a3c310a6866b5e37de3b7b555ebd96568d..0903960410a255c171a7b663ffb3ba4af9137074 100644 (file)
@@ -2544,7 +2544,17 @@ xfs_btree_new_iroot(
        if (error)
                goto error0;
 
+       /*
+        * we can't just memcpy() the root in for CRC enabled btree blocks.
+        * In that case have to also ensure the blkno remains correct
+        */
        memcpy(cblock, block, xfs_btree_block_len(cur));
+       if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
+               if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+                       cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
+               else
+                       cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
+       }
 
        be16_add_cpu(&block->bb_level, 1);
        xfs_btree_set_numrecs(block, 1);
index 82b70bda9f47a51eb3f31a50ae4fcd32f6c57047..1b2472a46e46b96e31e0615f670120218ed7cf24 100644 (file)
@@ -513,6 +513,7 @@ _xfs_buf_find(
                xfs_alert(btp->bt_mount,
                          "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
                          __func__, blkno, eofs);
+               WARN_ON(1);
                return NULL;
        }
 
@@ -1649,7 +1650,7 @@ xfs_alloc_buftarg(
 {
        xfs_buftarg_t           *btp;
 
-       btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
+       btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
 
        btp->bt_mount = mp;
        btp->bt_dev =  bdev->bd_dev;
index cf263476d6b43591c5786045857f6e4625bde4ce..4ec431777048740528d0d1b96ca7f52e6ba69d01 100644 (file)
@@ -262,12 +262,7 @@ xfs_buf_item_format_segment(
                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
                        vecp->i_len = nbits * XFS_BLF_CHUNK;
                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
-/*
- * You would think we need to bump the nvecs here too, but we do not
- * this number is used by recovery, and it gets confused by the boundary
- * split here
- *                     nvecs++;
- */
+                       nvecs++;
                        vecp++;
                        first_bit = next_bit;
                        last_bit = next_bit;
index 9b26a99ebfe917ed67123a77794751fc2b72cecd..0b8b2a13cd24debe493c8982679a2c565ebae5a1 100644 (file)
@@ -270,6 +270,7 @@ xfs_da3_node_read_verify(
                                break;
                        return;
                case XFS_ATTR_LEAF_MAGIC:
+               case XFS_ATTR3_LEAF_MAGIC:
                        bp->b_ops = &xfs_attr3_leaf_buf_ops;
                        bp->b_ops->verify_read(bp);
                        return;
@@ -2464,7 +2465,8 @@ xfs_buf_map_from_irec(
        ASSERT(nirecs >= 1);
 
        if (nirecs > 1) {
-               map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
+               map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
+                                 KM_SLEEP | KM_NOFS);
                if (!map)
                        return ENOMEM;
                *mapp = map;
@@ -2520,7 +2522,8 @@ xfs_dabuf_map(
                 * Optimize the one-block case.
                 */
                if (nfsb != 1)
-                       irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
+                       irecs = kmem_zalloc(sizeof(irec) * nfsb,
+                                           KM_SLEEP | KM_NOFS);
 
                nirecs = nfsb;
                error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
index f852b082a0844abf3f8fbfea601589c0c162a9c7..c407e1ccff438a1db7e11cfdca1db4d45beea000 100644 (file)
@@ -219,6 +219,14 @@ xfs_swap_extents(
        int             taforkblks = 0;
        __uint64_t      tmp;
 
+       /*
+        * We have no way of updating owner information in the BMBT blocks for
+        * each inode on CRC enabled filesystems, so to avoid corrupting the
+        * this metadata we simply don't allow extent swaps to occur.
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               return XFS_ERROR(EINVAL);
+
        tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
        if (!tempifp) {
                error = XFS_ERROR(ENOMEM);
index a3b1bd841a8055249dc16a40f2f32614715ecaba..7826782b8d789461eef5a5443ae1d29944887815 100644 (file)
@@ -266,6 +266,7 @@ struct xfs_dir3_blk_hdr {
 struct xfs_dir3_data_hdr {
        struct xfs_dir3_blk_hdr hdr;
        xfs_dir2_data_free_t    best_free[XFS_DIR2_DATA_FD_COUNT];
+       __be32                  pad;    /* 64 bit alignment */
 };
 
 #define XFS_DIR3_DATA_CRC_OFF  offsetof(struct xfs_dir3_data_hdr, hdr.crc)
@@ -477,7 +478,7 @@ struct xfs_dir3_leaf_hdr {
        struct xfs_da3_blkinfo  info;           /* header for da routines */
        __be16                  count;          /* count of entries */
        __be16                  stale;          /* count of stale entries */
-       __be32                  pad;
+       __be32                  pad;            /* 64 bit alignment */
 };
 
 struct xfs_dir3_icleaf_hdr {
@@ -715,6 +716,7 @@ struct xfs_dir3_free_hdr {
        __be32                  firstdb;        /* db of first entry */
        __be32                  nvalid;         /* count of valid entries */
        __be32                  nused;          /* count of used entries */
+       __be32                  pad;            /* 64 bit alignment */
 };
 
 struct xfs_dir3_free {
index 721ba2fe8e54581aebf884e1cb31d2f02e1e2534..da71a1819d780cd35bd9817fd89deabd5feb3eb7 100644 (file)
@@ -1336,7 +1336,7 @@ xfs_dir2_leaf_getdents(
                                     mp->m_sb.sb_blocksize);
        map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
                                (length * sizeof(struct xfs_bmbt_irec)),
-                              KM_SLEEP);
+                              KM_SLEEP | KM_NOFS);
        map_info->map_size = length;
 
        /*
index 5246de4912d4b07d3fda2940bfe30a15d77a09f1..2226a00acd156118a2998ce37c2c95ae628503d9 100644 (file)
@@ -263,18 +263,19 @@ xfs_dir3_free_get_buf(
         * Initialize the new block to be empty, and remember
         * its first slot as our empty slot.
         */
-       hdr.magic = XFS_DIR2_FREE_MAGIC;
-       hdr.firstdb = 0;
-       hdr.nused = 0;
-       hdr.nvalid = 0;
+       memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr));
+       memset(&hdr, 0, sizeof(hdr));
+
        if (xfs_sb_version_hascrc(&mp->m_sb)) {
                struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
 
                hdr.magic = XFS_DIR3_FREE_MAGIC;
+
                hdr3->hdr.blkno = cpu_to_be64(bp->b_bn);
                hdr3->hdr.owner = cpu_to_be64(dp->i_ino);
                uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
-       }
+       } else
+               hdr.magic = XFS_DIR2_FREE_MAGIC;
        xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr);
        *bpp = bp;
        return 0;
@@ -1921,8 +1922,6 @@ xfs_dir2_node_addname_int(
                         */
                        freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
                                        xfs_dir3_free_max_bests(mp);
-                       free->hdr.nvalid = 0;
-                       free->hdr.nused = 0;
                } else {
                        free = fbp->b_addr;
                        bests = xfs_dir3_free_bests_p(mp, free);
index a41f8bf1da3788818131386f8ceb08516ffd1603..044e97a33c8d0a155f1ea026a20881f95dd95274 100644 (file)
@@ -249,8 +249,11 @@ xfs_qm_init_dquot_blk(
                d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
                d->dd_diskdq.d_id = cpu_to_be32(curid);
                d->dd_diskdq.d_flags = type;
-               if (xfs_sb_version_hascrc(&mp->m_sb))
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
                        uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+                       xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                        XFS_DQUOT_CRC_OFF);
+               }
        }
 
        xfs_trans_dquot_buf(tp, bp,
@@ -286,23 +289,6 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
        dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 }
 
-STATIC void
-xfs_dquot_buf_calc_crc(
-       struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
-       int                     i;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return;
-
-       for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++, d++) {
-               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                offsetof(struct xfs_dqblk, dd_crc));
-       }
-}
-
 STATIC bool
 xfs_dquot_buf_verify_crc(
        struct xfs_mount        *mp,
@@ -328,12 +314,11 @@ xfs_dquot_buf_verify_crc(
 
        for (i = 0; i < ndquots; i++, d++) {
                if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                offsetof(struct xfs_dqblk, dd_crc)))
+                                XFS_DQUOT_CRC_OFF))
                        return false;
                if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
                        return false;
        }
-
        return true;
 }
 
@@ -393,6 +378,11 @@ xfs_dquot_buf_read_verify(
        }
 }
 
+/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+ */
 void
 xfs_dquot_buf_write_verify(
        struct xfs_buf  *bp)
@@ -404,7 +394,6 @@ xfs_dquot_buf_write_verify(
                xfs_buf_ioerror(bp, EFSCORRUPTED);
                return;
        }
-       xfs_dquot_buf_calc_crc(mp, bp);
 }
 
 const struct xfs_buf_ops xfs_dquot_buf_ops = {
@@ -1151,11 +1140,17 @@ xfs_qm_dqflush(
         * copy the lsn into the on-disk dquot now while we have the in memory
         * dquot here. This can't be done later in the write verifier as we
         * can't get access to the log item at that point in time.
+        *
+        * We also calculate the CRC here so that the on-disk dquot in the
+        * buffer always has a valid CRC. This ensures there is no possibility
+        * of a dquot without an up-to-date CRC getting to disk.
         */
        if (xfs_sb_version_hascrc(&mp->m_sb)) {
                struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
 
                dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
+               xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
        }
 
        /*
index c0f375087efc3256cf5196ff08cdd2068ce18834..452920a3f03fb2e4405ce52e34587e55acfb7abe 100644 (file)
@@ -305,11 +305,12 @@ xfs_efi_release(xfs_efi_log_item_t        *efip,
 {
        ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
        if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) {
-               __xfs_efi_release(efip);
-
                /* recovery needs us to drop the EFI reference, too */
                if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
                        __xfs_efi_release(efip);
+
+               __xfs_efi_release(efip);
+               /* efip may now have been freed, do not reference it again. */
        }
 }
 
index 6dda3f949b04f5c8a9ad804d030c9b21f99b4d96..d04695545397308a6596f5109a72f8923fd79419 100644 (file)
@@ -236,6 +236,7 @@ typedef struct xfs_fsop_resblks {
 #define XFS_FSOP_GEOM_FLAGS_PROJID32   0x0800  /* 32-bit project IDs   */
 #define XFS_FSOP_GEOM_FLAGS_DIRV2CI    0x1000  /* ASCII only CI names  */
 #define XFS_FSOP_GEOM_FLAGS_LAZYSB     0x4000  /* lazy superblock counters */
+#define XFS_FSOP_GEOM_FLAGS_V5SB       0x8000  /* version 5 superblock */
 
 
 /*
index 87595b211da10743bbd07c7be8b4fd44d9f8212b..3c3644ea825b65edd813965296aa4c73d6214313 100644 (file)
@@ -99,7 +99,9 @@ xfs_fs_geometry(
                        (xfs_sb_version_hasattr2(&mp->m_sb) ?
                                XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
                        (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
-                               XFS_FSOP_GEOM_FLAGS_PROJID32 : 0);
+                               XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
+                       (xfs_sb_version_hascrc(&mp->m_sb) ?
+                               XFS_FSOP_GEOM_FLAGS_V5SB : 0);
                geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
                                mp->m_sb.sb_logsectsize : BBSIZE;
                geo->rtsectsize = mp->m_sb.sb_blocksize;
index efbe1accb6ca0b5e433e87df5fe0976469764e3d..7f7be5f98f52f743e04b4a915ef8466a62ec6237 100644 (file)
@@ -1638,6 +1638,10 @@ xfs_iunlink(
                dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
                offset = ip->i_imap.im_boffset +
                        offsetof(xfs_dinode_t, di_next_unlinked);
+
+               /* need to recalc the inode CRC if appropriate */
+               xfs_dinode_calc_crc(mp, dip);
+
                xfs_trans_inode_buf(tp, ibp);
                xfs_trans_log_buf(tp, ibp, offset,
                                  (offset + sizeof(xfs_agino_t) - 1));
@@ -1723,6 +1727,10 @@ xfs_iunlink_remove(
                        dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
                        offset = ip->i_imap.im_boffset +
                                offsetof(xfs_dinode_t, di_next_unlinked);
+
+                       /* need to recalc the inode CRC if appropriate */
+                       xfs_dinode_calc_crc(mp, dip);
+
                        xfs_trans_inode_buf(tp, ibp);
                        xfs_trans_log_buf(tp, ibp, offset,
                                          (offset + sizeof(xfs_agino_t) - 1));
@@ -1796,6 +1804,10 @@ xfs_iunlink_remove(
                        dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
                        offset = ip->i_imap.im_boffset +
                                offsetof(xfs_dinode_t, di_next_unlinked);
+
+                       /* need to recalc the inode CRC if appropriate */
+                       xfs_dinode_calc_crc(mp, dip);
+
                        xfs_trans_inode_buf(tp, ibp);
                        xfs_trans_log_buf(tp, ibp, offset,
                                          (offset + sizeof(xfs_agino_t) - 1));
@@ -1809,6 +1821,10 @@ xfs_iunlink_remove(
                last_dip->di_next_unlinked = cpu_to_be32(next_agino);
                ASSERT(next_agino != 0);
                offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
+
+               /* need to recalc the inode CRC if appropriate */
+               xfs_dinode_calc_crc(mp, last_dip);
+
                xfs_trans_inode_buf(tp, last_ibp);
                xfs_trans_log_buf(tp, last_ibp, offset,
                                  (offset + sizeof(xfs_agino_t) - 1));
index d82efaa2ac7350553c8804c014c5f299809af178..ca9ecaa81112fac7706c4cac23c92f2326f0bba8 100644 (file)
@@ -455,6 +455,28 @@ xfs_vn_getattr(
        return 0;
 }
 
+static void
+xfs_setattr_mode(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       struct iattr            *iattr)
+{
+       struct inode    *inode = VFS_I(ip);
+       umode_t         mode = iattr->ia_mode;
+
+       ASSERT(tp);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+       if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+               mode &= ~S_ISGID;
+
+       ip->i_d.di_mode &= S_IFMT;
+       ip->i_d.di_mode |= mode & ~S_IFMT;
+
+       inode->i_mode &= S_IFMT;
+       inode->i_mode |= mode & ~S_IFMT;
+}
+
 int
 xfs_setattr_nonsize(
        struct xfs_inode        *ip,
@@ -606,18 +628,8 @@ xfs_setattr_nonsize(
        /*
         * Change file access modes.
         */
-       if (mask & ATTR_MODE) {
-               umode_t mode = iattr->ia_mode;
-
-               if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
-                       mode &= ~S_ISGID;
-
-               ip->i_d.di_mode &= S_IFMT;
-               ip->i_d.di_mode |= mode & ~S_IFMT;
-
-               inode->i_mode &= S_IFMT;
-               inode->i_mode |= mode & ~S_IFMT;
-       }
+       if (mask & ATTR_MODE)
+               xfs_setattr_mode(tp, ip, iattr);
 
        /*
         * Change file access or modified times.
@@ -714,9 +726,8 @@ xfs_setattr_size(
                return XFS_ERROR(error);
 
        ASSERT(S_ISREG(ip->i_d.di_mode));
-       ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
-                       ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
-                       ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+       ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+                       ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
 
        if (!(flags & XFS_ATTR_NOLOCK)) {
                lock_flags |= XFS_IOLOCK_EXCL;
@@ -860,6 +871,12 @@ xfs_setattr_size(
                xfs_inode_clear_eofblocks_tag(ip);
        }
 
+       /*
+        * Change file access modes.
+        */
+       if (mask & ATTR_MODE)
+               xfs_setattr_mode(tp, ip, iattr);
+
        if (mask & ATTR_CTIME) {
                inode->i_ctime = iattr->ia_ctime;
                ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
index e3d0b85d852b6e2c3b07bc07482d2a7e357ccf90..d0833b54e55d63ed83eb61cedfd110cd4a1263b2 100644 (file)
@@ -139,7 +139,7 @@ xlog_cil_prepare_log_vecs(
 
                new_lv = kmem_zalloc(sizeof(*new_lv) +
                                niovecs * sizeof(struct xfs_log_iovec),
-                               KM_SLEEP);
+                               KM_SLEEP|KM_NOFS);
 
                /* The allocated iovec region lies beyond the log vector. */
                new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
index 93f03ec17eeca5052a99a713895a1771887e55ac..7cf5e4eafe28b1a05890d63e5c1d706d964eef19 100644 (file)
@@ -1599,10 +1599,43 @@ xlog_recover_add_to_trans(
 }
 
 /*
- * Sort the log items in the transaction. Cancelled buffers need
- * to be put first so they are processed before any items that might
- * modify the buffers. If they are cancelled, then the modifications
- * don't need to be replayed.
+ * Sort the log items in the transaction.
+ *
+ * The ordering constraints are defined by the inode allocation and unlink
+ * behaviour. The rules are:
+ *
+ *     1. Every item is only logged once in a given transaction. Hence it
+ *        represents the last logged state of the item. Hence ordering is
+ *        dependent on the order in which operations need to be performed so
+ *        required initial conditions are always met.
+ *
+ *     2. Cancelled buffers are recorded in pass 1 in a separate table and
+ *        there's nothing to replay from them so we can simply cull them
+ *        from the transaction. However, we can't do that until after we've
+ *        replayed all the other items because they may be dependent on the
+ *        cancelled buffer and replaying the cancelled buffer can remove it
+ *        form the cancelled buffer table. Hence they have tobe done last.
+ *
+ *     3. Inode allocation buffers must be replayed before inode items that
+ *        read the buffer and replay changes into it.
+ *
+ *     4. Inode unlink buffers must be replayed after inode items are replayed.
+ *        This ensures that inodes are completely flushed to the inode buffer
+ *        in a "free" state before we remove the unlinked inode list pointer.
+ *
+ * Hence the ordering needs to be inode allocation buffers first, inode items
+ * second, inode unlink buffers third and cancelled buffers last.
+ *
+ * But there's a problem with that - we can't tell an inode allocation buffer
+ * apart from a regular buffer, so we can't separate them. We can, however,
+ * tell an inode unlink buffer from the others, and so we can separate them out
+ * from all the other buffers and move them to last.
+ *
+ * Hence, 4 lists, in order from head to tail:
+ *     - buffer_list for all buffers except cancelled/inode unlink buffers
+ *     - item_list for all non-buffer items
+ *     - inode_buffer_list for inode unlink buffers
+ *     - cancel_list for the cancelled buffers
  */
 STATIC int
 xlog_recover_reorder_trans(
@@ -1612,6 +1645,10 @@ xlog_recover_reorder_trans(
 {
        xlog_recover_item_t     *item, *n;
        LIST_HEAD(sort_list);
+       LIST_HEAD(cancel_list);
+       LIST_HEAD(buffer_list);
+       LIST_HEAD(inode_buffer_list);
+       LIST_HEAD(inode_list);
 
        list_splice_init(&trans->r_itemq, &sort_list);
        list_for_each_entry_safe(item, n, &sort_list, ri_list) {
@@ -1619,12 +1656,18 @@ xlog_recover_reorder_trans(
 
                switch (ITEM_TYPE(item)) {
                case XFS_LI_BUF:
-                       if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
+                       if (buf_f->blf_flags & XFS_BLF_CANCEL) {
                                trace_xfs_log_recover_item_reorder_head(log,
                                                        trans, item, pass);
-                               list_move(&item->ri_list, &trans->r_itemq);
+                               list_move(&item->ri_list, &cancel_list);
+                               break;
+                       }
+                       if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
+                               list_move(&item->ri_list, &inode_buffer_list);
                                break;
                        }
+                       list_move_tail(&item->ri_list, &buffer_list);
+                       break;
                case XFS_LI_INODE:
                case XFS_LI_DQUOT:
                case XFS_LI_QUOTAOFF:
@@ -1632,7 +1675,7 @@ xlog_recover_reorder_trans(
                case XFS_LI_EFI:
                        trace_xfs_log_recover_item_reorder_tail(log,
                                                        trans, item, pass);
-                       list_move_tail(&item->ri_list, &trans->r_itemq);
+                       list_move_tail(&item->ri_list, &inode_list);
                        break;
                default:
                        xfs_warn(log->l_mp,
@@ -1643,6 +1686,14 @@ xlog_recover_reorder_trans(
                }
        }
        ASSERT(list_empty(&sort_list));
+       if (!list_empty(&buffer_list))
+               list_splice(&buffer_list, &trans->r_itemq);
+       if (!list_empty(&inode_list))
+               list_splice_tail(&inode_list, &trans->r_itemq);
+       if (!list_empty(&inode_buffer_list))
+               list_splice_tail(&inode_buffer_list, &trans->r_itemq);
+       if (!list_empty(&cancel_list))
+               list_splice_tail(&cancel_list, &trans->r_itemq);
        return 0;
 }
 
@@ -1794,7 +1845,13 @@ xlog_recover_do_inode_buffer(
        xfs_agino_t             *buffer_nextp;
 
        trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
-       bp->b_ops = &xfs_inode_buf_ops;
+
+       /*
+        * Post recovery validation only works properly on CRC enabled
+        * filesystems.
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               bp->b_ops = &xfs_inode_buf_ops;
 
        inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
        for (i = 0; i < inodes_per_buf; i++) {
@@ -1861,6 +1918,15 @@ xlog_recover_do_inode_buffer(
                buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
                                              next_unlinked_offset);
                *buffer_nextp = *logged_nextp;
+
+               /*
+                * If necessary, recalculate the CRC in the on-disk inode. We
+                * have to leave the inode in a consistent state for whoever
+                * reads it next....
+                */
+               xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
+                               xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
+
        }
 
        return 0;
@@ -2096,6 +2162,17 @@ xlog_recover_do_reg_buffer(
                ASSERT(BBTOB(bp->b_io_length) >=
                       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
 
+               /*
+                * The dirty regions logged in the buffer, even though
+                * contiguous, may span multiple chunks. This is because the
+                * dirty region may span a physical page boundary in a buffer
+                * and hence be split into two separate vectors for writing into
+                * the log. Hence we need to trim nbits back to the length of
+                * the current region being copied out of the log.
+                */
+               if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
+                       nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
+
                /*
                 * Do a sanity check if this is a dquot buffer. Just checking
                 * the first dquot in the buffer should do. XXXThis is
@@ -2134,7 +2211,16 @@ xlog_recover_do_reg_buffer(
        /* Shouldn't be any more regions */
        ASSERT(i == item->ri_total);
 
-       xlog_recovery_validate_buf_type(mp, bp, buf_f);
+       /*
+        * We can only do post recovery validation on items on CRC enabled
+        * fielsystems as we need to know when the buffer was written to be able
+        * to determine if we should have replayed the item. If we replay old
+        * metadata over a newer buffer, then it will enter a temporarily
+        * inconsistent state resulting in verification failures. Hence for now
+        * just avoid the verification stage for non-crc filesystems
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               xlog_recovery_validate_buf_type(mp, bp, buf_f);
 }
 
 /*
@@ -2255,6 +2341,12 @@ xfs_qm_dqcheck(
        d->dd_diskdq.d_flags = type;
        d->dd_diskdq.d_id = cpu_to_be32(id);
 
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
+       }
+
        return errs;
 }
 
@@ -2782,6 +2874,10 @@ xlog_recover_dquot_pass2(
        }
 
        memcpy(ddq, recddq, item->ri_buf[1].i_len);
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
+       }
 
        ASSERT(dq_f->qlf_size == 2);
        ASSERT(bp->b_target->bt_mount == mp);
index f6bfbd73466981042e21f0a5fe150450701e57dd..e8e310c050977c51ecf1927c177c17cc0b17ef01 100644 (file)
@@ -314,7 +314,8 @@ STATIC int
 xfs_mount_validate_sb(
        xfs_mount_t     *mp,
        xfs_sb_t        *sbp,
-       bool            check_inprogress)
+       bool            check_inprogress,
+       bool            check_version)
 {
 
        /*
@@ -337,9 +338,10 @@ xfs_mount_validate_sb(
 
        /*
         * Version 5 superblock feature mask validation. Reject combinations the
-        * kernel cannot support up front before checking anything else.
+        * kernel cannot support up front before checking anything else. For
+        * write validation, we don't need to check feature masks.
         */
-       if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
+       if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
                xfs_alert(mp,
 "Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
 "Use of these features in this kernel is at your own risk!");
@@ -675,7 +677,8 @@ xfs_sb_to_disk(
 
 static int
 xfs_sb_verify(
-       struct xfs_buf  *bp)
+       struct xfs_buf  *bp,
+       bool            check_version)
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
        struct xfs_sb   sb;
@@ -686,7 +689,8 @@ xfs_sb_verify(
         * Only check the in progress field for the primary superblock as
         * mkfs.xfs doesn't clear it from secondary superblocks.
         */
-       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR);
+       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+                                    check_version);
 }
 
 /*
@@ -719,7 +723,7 @@ xfs_sb_read_verify(
                        goto out_error;
                }
        }
-       error = xfs_sb_verify(bp);
+       error = xfs_sb_verify(bp, true);
 
 out_error:
        if (error) {
@@ -758,7 +762,7 @@ xfs_sb_write_verify(
        struct xfs_buf_log_item *bip = bp->b_fspriv;
        int                     error;
 
-       error = xfs_sb_verify(bp);
+       error = xfs_sb_verify(bp, false);
        if (error) {
                XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
                xfs_buf_ioerror(bp, error);
index f41702b43003d3930ea3e281da363e495b3edd6b..b75c9bb6e71e34b0c65158f63ba475a217d9e347 100644 (file)
@@ -41,6 +41,7 @@
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_cksum.h"
 
 /*
  * The global quota manager. There is only one of these for the entire
@@ -839,7 +840,7 @@ xfs_qm_reset_dqcounts(
        xfs_dqid_t      id,
        uint            type)
 {
-       xfs_disk_dquot_t        *ddq;
+       struct xfs_dqblk        *dqb;
        int                     j;
 
        trace_xfs_reset_dqcounts(bp, _RET_IP_);
@@ -853,8 +854,12 @@ xfs_qm_reset_dqcounts(
        do_div(j, sizeof(xfs_dqblk_t));
        ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 #endif
-       ddq = bp->b_addr;
+       dqb = bp->b_addr;
        for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
+               struct xfs_disk_dquot   *ddq;
+
+               ddq = (struct xfs_disk_dquot *)&dqb[j];
+
                /*
                 * Do a sanity check, and if needed, repair the dqblk. Don't
                 * output any warnings because it's perfectly possible to
@@ -871,7 +876,12 @@ xfs_qm_reset_dqcounts(
                ddq->d_bwarns = 0;
                ddq->d_iwarns = 0;
                ddq->d_rtbwarns = 0;
-               ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
+
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+                       xfs_update_cksum((char *)&dqb[j],
+                                        sizeof(struct xfs_dqblk),
+                                        XFS_DQUOT_CRC_OFF);
+               }
        }
 }
 
@@ -907,19 +917,29 @@ xfs_qm_dqiter_bufs(
                              XFS_FSB_TO_DADDR(mp, bno),
                              mp->m_quotainfo->qi_dqchunklen, 0, &bp,
                              &xfs_dquot_buf_ops);
-               if (error)
-                       break;
 
                /*
-                * XXX(hch): need to figure out if it makes sense to validate
-                *           the CRC here.
+                * CRC and validation errors will return a EFSCORRUPTED here. If
+                * this occurs, re-read without CRC validation so that we can
+                * repair the damage via xfs_qm_reset_dqcounts(). This process
+                * will leave a trace in the log indicating corruption has
+                * been detected.
                 */
+               if (error == EFSCORRUPTED) {
+                       error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+                                     XFS_FSB_TO_DADDR(mp, bno),
+                                     mp->m_quotainfo->qi_dqchunklen, 0, &bp,
+                                     NULL);
+               }
+
+               if (error)
+                       break;
+
                xfs_qm_reset_dqcounts(mp, bp, firstid, type);
                xfs_buf_delwri_queue(bp, buffer_list);
                xfs_buf_relse(bp);
-               /*
-                * goto the next block.
-                */
+
+               /* goto the next block. */
                bno++;
                firstid += mp->m_quotainfo->qi_dqperchunk;
        }
index c41190cad6e91a78306122a4ef18511d3f3b8f01..6cdf6ffc36a1d7f9fc967ec6c72de3621a58b47c 100644 (file)
@@ -489,31 +489,36 @@ xfs_qm_scall_setqlim(
        if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
                return 0;
 
-       tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
-       error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
-                                 0, 0, XFS_DEFAULT_LOG_COUNT);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return (error);
-       }
-
        /*
         * We don't want to race with a quotaoff so take the quotaoff lock.
-        * (We don't hold an inode lock, so there's nothing else to stop
-        * a quotaoff from happening). (XXXThis doesn't currently happen
-        * because we take the vfslock before calling xfs_qm_sysent).
+        * We don't hold an inode lock, so there's nothing else to stop
+        * a quotaoff from happening.
         */
        mutex_lock(&q->qi_quotaofflock);
 
        /*
-        * Get the dquot (locked), and join it to the transaction.
-        * Allocate the dquot if this doesn't exist.
+        * Get the dquot (locked) before we start, as we need to do a
+        * transaction to allocate it if it doesn't exist. Once we have the
+        * dquot, unlock it so we can start the next transaction safely. We hold
+        * a reference to the dquot, so it's safe to do this unlock/lock without
+        * it being reclaimed in the mean time.
         */
-       if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
-               xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+       error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
+       if (error) {
                ASSERT(error != ENOENT);
                goto out_unlock;
        }
+       xfs_dqunlock(dqp);
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
+       error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
+                                 0, 0, XFS_DEFAULT_LOG_COUNT);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_rele;
+       }
+
+       xfs_dqlock(dqp);
        xfs_trans_dqjoin(tp, dqp);
        ddq = &dqp->q_core;
 
@@ -621,9 +626,10 @@ xfs_qm_scall_setqlim(
        xfs_trans_log_dquot(tp, dqp);
 
        error = xfs_trans_commit(tp, 0);
-       xfs_qm_dqrele(dqp);
 
- out_unlock:
+out_rele:
+       xfs_qm_dqrele(dqp);
+out_unlock:
        mutex_unlock(&q->qi_quotaofflock);
        return error;
 }
index c61e31c7d99791271848205b031ddc86af2142b8..c38068f26c558d2b1f6c81ac902c4c53498cafbc 100644 (file)
@@ -87,6 +87,8 @@ typedef struct xfs_dqblk {
        uuid_t            dd_uuid;      /* location information */
 } xfs_dqblk_t;
 
+#define XFS_DQUOT_CRC_OFF      offsetof(struct xfs_dqblk, dd_crc)
+
 /*
  * flags for q_flags field in the dquot.
  */
index ea341cea68cbfc5a7798810f0239ff34bfedfb09..3033ba5e9762f19609a7f86d5117f64eb70f7bd4 100644 (file)
@@ -1372,6 +1372,17 @@ xfs_finish_flags(
                }
        }
 
+       /*
+        * V5 filesystems always use attr2 format for attributes.
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb) &&
+           (mp->m_flags & XFS_MOUNT_NOATTR2)) {
+               xfs_warn(mp,
+"Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.",
+                       MNTOPT_NOATTR2, MNTOPT_ATTR2);
+               return XFS_ERROR(EINVAL);
+       }
+
        /*
         * mkfs'ed attr2 will turn on attr2 mount unless explicitly
         * told by noattr2 to turn it off
index 5f234389327c806c286c7c1b8a2b0578a4aa8a9f..195a403e1522bbed00cb0f843b1d53c885db0e0d 100644 (file)
@@ -56,16 +56,9 @@ xfs_symlink_blocks(
        struct xfs_mount *mp,
        int             pathlen)
 {
-       int             fsblocks = 0;
-       int             len = pathlen;
+       int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
 
-       do {
-               fsblocks++;
-               len -= XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
-       } while (len > 0);
-
-       ASSERT(fsblocks <= XFS_SYMLINK_MAPS);
-       return fsblocks;
+       return (pathlen + buflen - 1) / buflen;
 }
 
 static int
@@ -405,7 +398,7 @@ xfs_symlink(
        if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
                fs_blocks = 0;
        else
-               fs_blocks = XFS_B_TO_FSB(mp, pathlen);
+               fs_blocks = xfs_symlink_blocks(mp, pathlen);
        resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
        error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
                        XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
@@ -512,7 +505,7 @@ xfs_symlink(
                cur_chunk = target_path;
                offset = 0;
                for (n = 0; n < nmaps; n++) {
-                       char *buf;
+                       char    *buf;
 
                        d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
                        byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
@@ -525,9 +518,7 @@ xfs_symlink(
                        bp->b_ops = &xfs_symlink_buf_ops;
 
                        byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
-                       if (pathlen < byte_cnt) {
-                               byte_cnt = pathlen;
-                       }
+                       byte_cnt = min(byte_cnt, pathlen);
 
                        buf = bp->b_addr;
                        buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
@@ -542,6 +533,7 @@ xfs_symlink(
                        xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
                                                        (char *)bp->b_addr);
                }
+               ASSERT(pathlen == 0);
        }
 
        /*
index 1501f4fa51a6e4a0a683ca9998ded29a0410e450..0176bb21f09a5c3c0795f6f6932e07f92643c94a 100644 (file)
@@ -1453,7 +1453,7 @@ xfs_free_file_space(
        xfs_mount_t             *mp;
        int                     nimap;
        uint                    resblks;
-       uint                    rounding;
+       xfs_off_t               rounding;
        int                     rt;
        xfs_fileoff_t           startoffset_fsb;
        xfs_trans_t             *tp;
@@ -1482,7 +1482,7 @@ xfs_free_file_space(
                inode_dio_wait(VFS_I(ip));
        }
 
-       rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+       rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
        ioffset = offset & ~(rounding - 1);
        error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                              ioffset, -1);
index 98db31d9f9b410908b2056b192bf9813bc8ce4aa..636c59f2003a315c264777eccd0e3abfbdd8045f 100644 (file)
@@ -377,7 +377,6 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
 
-#ifdef CONFIG_PM
 int acpi_bus_set_power(acpi_handle handle, int state);
 const char *acpi_power_state_string(int state);
 int acpi_device_get_power(struct acpi_device *device, int *state);
@@ -385,41 +384,12 @@ int acpi_device_set_power(struct acpi_device *device, int state);
 int acpi_bus_init_power(struct acpi_device *device);
 int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
+
+#ifdef CONFIG_PM
 bool acpi_bus_can_wakeup(acpi_handle handle);
-#else /* !CONFIG_PM */
-static inline int acpi_bus_set_power(acpi_handle handle, int state)
-{
-       return 0;
-}
-static inline const char *acpi_power_state_string(int state)
-{
-       return "D0";
-}
-static inline int acpi_device_get_power(struct acpi_device *device, int *state)
-{
-       return 0;
-}
-static inline int acpi_device_set_power(struct acpi_device *device, int state)
-{
-       return 0;
-}
-static inline int acpi_bus_init_power(struct acpi_device *device)
-{
-       return 0;
-}
-static inline int acpi_bus_update_power(acpi_handle handle, int *state_p)
-{
-       return 0;
-}
-static inline bool acpi_bus_power_manageable(acpi_handle handle)
-{
-       return false;
-}
-static inline bool acpi_bus_can_wakeup(acpi_handle handle)
-{
-       return false;
-}
-#endif /* !CONFIG_PM */
+#else
+static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
+#endif
 
 #ifdef CONFIG_ACPI_PROC_EVENT
 int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
index 5b3d2bd4813ae30895160d1a383ea149615d292e..64b8c7639520d1e41e1439bc558d7a35424e25ef 100644 (file)
@@ -77,7 +77,7 @@ struct acpi_signal_fatal_info {
 /*
  * OSL Initialization and shutdown primitives
  */
-acpi_status __initdata acpi_os_initialize(void);
+acpi_status __init acpi_os_initialize(void);
 
 acpi_status acpi_os_terminate(void);
 
index b327b5a9296d36fb6c31fd26e382488fd0b45cbb..ea69367fdd3bbafaf1775da248c7cc998c47fba8 100644 (file)
@@ -329,10 +329,16 @@ int acpi_processor_power_init(struct acpi_processor *pr);
 int acpi_processor_power_exit(struct acpi_processor *pr);
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
 int acpi_processor_hotplug(struct acpi_processor *pr);
-int acpi_processor_suspend(struct device *dev);
-int acpi_processor_resume(struct device *dev);
 extern struct cpuidle_driver acpi_idle_driver;
 
+#ifdef CONFIG_PM_SLEEP
+void acpi_processor_syscore_init(void);
+void acpi_processor_syscore_exit(void);
+#else
+static inline void acpi_processor_syscore_init(void) {}
+static inline void acpi_processor_syscore_exit(void) {}
+#endif
+
 /* in processor_thermal.c */
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
index ac9da00e9f2c66b104029aa7b89253802708a322..d5afe96adba6c5ee239390625bbb93907c2cc1ba 100644 (file)
@@ -343,8 +343,12 @@ extern void ioport_unmap(void __iomem *p);
 #endif /* CONFIG_GENERIC_IOMAP */
 #endif /* CONFIG_HAS_IOPORT */
 
+#ifndef xlate_dev_kmem_ptr
 #define xlate_dev_kmem_ptr(p)  p
+#endif
+#ifndef xlate_dev_mem_ptr
 #define xlate_dev_mem_ptr(p)   __va(p)
+#endif
 
 #ifdef CONFIG_VIRT_TO_BUS
 #ifndef virt_to_bus
index 9d96605f160a00315064d95a8ed1ac242b0c4775..fa25becbdcaf406faddcf9b00cad76fbfa343b72 100644 (file)
@@ -18,4 +18,9 @@ static inline unsigned int kvm_arch_para_features(void)
        return 0;
 }
 
+static inline bool kvm_para_available(void)
+{
+       return false;
+}
+
 #endif
index b1b1fa6ffffecce461a44945524d60d9b46dd54c..13821c339a4151912b88f2e4de04cbf99653a612 100644 (file)
@@ -97,11 +97,9 @@ struct mmu_gather {
        unsigned long           start;
        unsigned long           end;
        unsigned int            need_flush : 1, /* Did free PTEs */
-                               fast_mode  : 1; /* No batching   */
-
        /* we are in the middle of an operation to clear
         * a full mm and can make some optimizations */
-       unsigned int            fullmm : 1,
+                               fullmm : 1,
        /* we have performed an operation which
         * requires a complete flush of the tlb */
                                need_flush_all : 1;
@@ -114,19 +112,6 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-static inline int tlb_fast_mode(struct mmu_gather *tlb)
-{
-#ifdef CONFIG_SMP
-       return tlb->fast_mode;
-#else
-       /*
-        * For UP we don't need to worry about TLB flush
-        * and page free order so much..
-        */
-       return 1;
-#endif
-}
-
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
index 61196592152e09a3913d1989cd847a8f9457664b..63d17ee9eb488c336ad55521f346dc1c2703c4c4 100644 (file)
@@ -316,6 +316,7 @@ struct drm_ioctl_desc {
        int flags;
        drm_ioctl_t *func;
        unsigned int cmd_drv;
+       const char *name;
 };
 
 /**
@@ -324,7 +325,7 @@ struct drm_ioctl_desc {
  */
 
 #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)                        \
-       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
+       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
 
 struct drm_magic_entry {
        struct list_head head;
index 8230b46fdd73ff916324a9d4d0dd064a13207502..471f276ce8f741638ad6ced787aa7f943457e751 100644 (file)
@@ -50,13 +50,14 @@ struct drm_fb_helper_surface_size {
 
 /**
  * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
- * @gamma_set: - Set the given gamma lut register on the given crtc.
- * @gamma_get: - Read the given gamma lut register on the given crtc, used to
- *              save the current lut when force-restoring the fbdev for e.g.
- *              kdbg.
- * @fb_probe: - Driver callback to allocate and initialize the fbdev info
- *             structure. Futhermore it also needs to allocate the drm
- *             framebuffer used to back the fbdev.
+ * @gamma_set: Set the given gamma lut register on the given crtc.
+ * @gamma_get: Read the given gamma lut register on the given crtc, used to
+ *             save the current lut when force-restoring the fbdev for e.g.
+ *             kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+ *            structure. Futhermore it also needs to allocate the drm
+ *            framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
  *
  * Driver callbacks used by the fbdev emulation helper library.
  */
index 393369147a2dd7812ea5555f30ac8a4390ed1442..675ddf4b441f4746942dd62b2ae5febdaaad2868 100644 (file)
@@ -87,15 +87,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
 /** Other copying of data from kernel space */
 #define DRM_COPY_TO_USER(arg1, arg2, arg3)             \
        copy_to_user(arg1, arg2, arg3)
-/* Macros for copyfrom user, but checking readability only once */
-#define DRM_VERIFYAREA_READ( uaddr, size )             \
-       (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
-       __copy_from_user(arg1, arg2, arg3)
-#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)   \
-       __copy_to_user(arg1, arg2, arg3)
-#define DRM_GET_USER_UNCHECKED(val, uaddr)             \
-       __get_user(val, uaddr)
 
 #define DRM_HZ HZ
 
index c2af598f701dac425a6b4b7d65bfec06529801cb..bb1bc485390b253826e683de25dcf8f8c5d72f4e 100644 (file)
        {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
index d09deabc7bf64773eff4223d631830dc74ef45b3..fb0298082916fa7f7622de3fc2233ca4732b649e 100644 (file)
@@ -37,6 +37,8 @@ struct acpi_dma_spec {
  * @dev:               struct device of this controller
  * @acpi_dma_xlate:    callback function to find a suitable channel
  * @data:              private data used by a callback function
+ * @base_request_line: first supported request line (CSRT)
+ * @end_request_line:  last supported request line (CSRT)
  */
 struct acpi_dma {
        struct list_head        dma_controllers;
@@ -44,6 +46,8 @@ struct acpi_dma {
        struct dma_chan         *(*acpi_dma_xlate)
                                (struct acpi_dma_spec *, struct acpi_dma *);
        void                    *data;
+       unsigned short          base_request_line;
+       unsigned short          end_request_line;
 };
 
 /* Used with acpi_dma_simple_xlate() */
index ec10e1b24c1cce50d50581d58c5000cf7b9e5565..737f90ab4b6235abd340b1c47d9308591c138e8b 100644 (file)
@@ -49,10 +49,11 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 }
 #endif
 
-extern void cper_print_aer(const char *prefix, struct pci_dev *dev,
+extern void cper_print_aer(struct pci_dev *dev,
                           int cper_severity, struct aer_capability_regs *aer);
 extern int cper_severity_to_aer(int cper_severity);
 extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
-                             int severity);
+                             int severity,
+                             struct aer_capability_regs *aer_regs);
 #endif //_AER_H_
 
index b840a496028259076caff141de3a6a92ed177a3a..677b4f01b2d0bfd2dd1619d930ec92992bcd1d7d 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef _LINUX_BRCMPHY_H
+#define _LINUX_BRCMPHY_H
+
 #define PHY_ID_BCM50610                        0x0143bd60
 #define PHY_ID_BCM50610M               0x0143bd70
 #define PHY_ID_BCM5241                 0x0143bc30
@@ -29,3 +32,5 @@
 #define PHY_BRCM_CLEAR_RGMII_MODE      0x00004000
 #define PHY_BRCM_DIS_TXCRXC_NOENRGY    0x00008000
 #define PHY_BCM_FLAGS_VALID            0x80000000
+
+#endif /* _LINUX_BRCMPHY_H */
index 5047355b9a0fcf4e2b07076b414c255d2c9b7510..8bda1294c035b24912a3da178ebc2c8e1cd5f827 100644 (file)
@@ -707,7 +707,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
  *
  * If a subsystem synchronizes against the parent in its ->css_online() and
  * before starting iterating, and synchronizes against @pos on each
- * iteration, any descendant cgroup which finished ->css_offline() is
+ * iteration, any descendant cgroup which finished ->css_online() is
  * guaranteed to be visible in the future iterations.
  *
  * In other words, the following guarantees that a descendant can't escape
index c6f6e0839b618611723507d4a5dcab33648a9d02..9f3c7e81270ad11c18fdbfdec42416058560af7f 100644 (file)
@@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys;
 
 extern void get_online_cpus(void);
 extern void put_online_cpus(void);
+extern void cpu_hotplug_disable(void);
+extern void cpu_hotplug_enable(void);
 #define hotcpu_notifier(fn, pri)       cpu_notifier(fn, pri)
 #define register_hotcpu_notifier(nb)   register_cpu_notifier(nb)
 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
 
 #define get_online_cpus()      do { } while (0)
 #define put_online_cpus()      do { } while (0)
+#define cpu_hotplug_disable()  do { } while (0)
+#define cpu_hotplug_enable()   do { } while (0)
 #define hotcpu_notifier(fn, pri)       do { (void)(fn); } while (0)
 /* These aren't inline functions due to a GCC bug. */
 #define register_hotcpu_notifier(nb)   ({ (void)(nb); 0; })
index c050dcc322a43e2264bdaf49b3667a0b2e63c6ab..a6ac84871d6d415eb0e671b45daeddacbc7ad4a3 100644 (file)
@@ -46,6 +46,7 @@ extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
 extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
+extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
 
 #ifdef CONFIG_BPF_JIT
 #include <stdarg.h>
@@ -58,10 +59,10 @@ extern void bpf_jit_free(struct sk_filter *fp);
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                                u32 pass, void *image)
 {
-       pr_err("flen=%u proglen=%u pass=%u image=%p\n",
+       pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
               flen, proglen, pass, image);
        if (image)
-               print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
+               print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
                               16, 1, image, proglen, false);
 }
 #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
index c3f817c3eb451091e5b02fc6c8f22e7af5d1126a..a86784dec3d34fcafb7e5d1b377a9d73b2d7717e 100644 (file)
@@ -12,5 +12,6 @@ struct ifla_vf_info {
        __u32 qos;
        __u32 tx_rate;
        __u32 spoofchk;
+       __u32 linkstate;
 };
 #endif /* _LINUX_IF_LINK_H */
index 84dde1dd1da40a19477141016a6202220d46d63f..ddd33fd5904dbb32d428c449543ecc6e3dc8a010 100644 (file)
@@ -8,7 +8,7 @@
 #include <net/netlink.h>
 #include <linux/u64_stats_sync.h>
 
-#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE)
+#if IS_ENABLED(CONFIG_MACVTAP)
 struct socket *macvtap_get_socket(struct file *);
 #else
 #include <linux/err.h>
@@ -50,7 +50,7 @@ struct macvlan_pcpu_stats {
  * Maximum times a macvtap device can be opened. This can be used to
  * configure the number of receive queue, e.g. for multiqueue virtio.
  */
-#define MAX_MACVTAP_QUEUES     (NR_CPUS < 16 ? NR_CPUS : 16)
+#define MAX_MACVTAP_QUEUES     16
 
 #define MACVLAN_MC_FILTER_BITS 8
 #define MACVLAN_MC_FILTER_SZ   (1 << MACVLAN_MC_FILTER_BITS)
@@ -65,12 +65,18 @@ struct macvlan_dev {
 
        DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
 
+       netdev_features_t       set_features;
        enum macvlan_mode       mode;
        u16                     flags;
        int (*receive)(struct sk_buff *skb);
        int (*forward)(struct net_device *dev, struct sk_buff *skb);
-       struct macvtap_queue    *taps[MAX_MACVTAP_QUEUES];
+       /* This array tracks active taps. */
+       struct macvtap_queue    __rcu *taps[MAX_MACVTAP_QUEUES];
+       /* This list tracks all taps (both enabled and disabled) */
+       struct list_head        queue_list;
        int                     numvtaps;
+       int                     numqueues;
+       netdev_features_t       tap_features;
        int                     minor;
 };
 
index 4474557904f69eef849b329b41b91ba4f72bb678..f6156f91eb1cb0fb06597e83b95a8bf74664cee6 100644 (file)
@@ -69,6 +69,7 @@ struct team_port {
        s32 priority; /* lower number ~ higher priority */
        u16 queue_id;
        struct list_head qom_list; /* node in queue override mapping list */
+       struct rcu_head rcu;
        long mode_priv[0];
 };
 
@@ -228,6 +229,16 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
                        return port;
        return NULL;
 }
+
+static inline int team_num_to_port_index(struct team *team, int num)
+{
+       int en_port_count = ACCESS_ONCE(team->en_port_count);
+
+       if (unlikely(!en_port_count))
+               return 0;
+       return num % en_port_count;
+}
+
 static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
                                                           int port_index)
 {
@@ -249,12 +260,12 @@ team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
                return port;
        cur = port;
        list_for_each_entry_continue_rcu(cur, &team->port_list, list)
-               if (team_port_txable(port))
+               if (team_port_txable(cur))
                        return cur;
        list_for_each_entry_rcu(cur, &team->port_list, list) {
                if (cur == port)
                        break;
-               if (team_port_txable(port))
+               if (team_port_txable(cur))
                        return cur;
        }
        return NULL;
index 52bd03b389625c4f449052d021547712a0022cbe..7a9c8cf31659f1e324a68237f1b37e0e21f3a669 100644 (file)
@@ -243,8 +243,6 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
        return skb;
 }
 
-#define HAVE_VLAN_PUT_TAG
-
 /**
  * vlan_put_tag - inserts VLAN tag according to device features
  * @skb: skbuff to tag
index 7f2bf15184802b12733489b36755fce4e9c2caf4..e3362b5f13e8258857f50a64e73fe3e70f9039f3 100644 (file)
@@ -84,6 +84,7 @@ struct ip_mc_list {
                struct ip_mc_list *next;
                struct ip_mc_list __rcu *next_rcu;
        };
+       struct ip_mc_list __rcu *next_hash;
        struct timer_list       timer;
        int                     users;
        atomic_t                refcnt;
index ea1e3b8638900a5d7f5cf46e49eec37c3274f9a9..b99cd23f347489ea0a3c2a14d58193c6e1117331 100644 (file)
@@ -50,12 +50,17 @@ struct ipv4_devconf {
        DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
 };
 
+#define MC_HASH_SZ_LOG 9
+
 struct in_device {
        struct net_device       *dev;
        atomic_t                refcnt;
        int                     dead;
        struct in_ifaddr        *ifa_list;      /* IP ifaddr chain              */
+
        struct ip_mc_list __rcu *mc_list;       /* IP multicast filter chain    */
+       struct ip_mc_list __rcu * __rcu *mc_hash;
+
        int                     mc_count;       /* Number of installed mcasts   */
        spinlock_t              mc_tomb_lock;
        struct ip_mc_list       *mc_tomb;
index 13a3da25ff0752a99bccb12b9c105cdbdae13457..98cd41bb39c8667953c10030ef32f2945765f7e8 100644 (file)
@@ -30,15 +30,19 @@ struct journal_head {
 
        /*
         * Journalling list for this buffer [jbd_lock_bh_state()]
+        * NOTE: We *cannot* combine this with b_modified into a bitfield
+        * as gcc would then (which the C standard allows but which is
+        * very unuseful) make 64-bit accesses to the bitfield and clobber
+        * b_jcount if its update races with bitfield modification.
         */
-       unsigned b_jlist:4;
+       unsigned b_jlist;
 
        /*
         * This flag signals the buffer has been modified by
         * the currently running transaction
         * [jbd_lock_bh_state()]
         */
-       unsigned b_modified:1;
+       unsigned b_modified;
 
        /*
         * Copy of the buffer data frozen for writing to the log.
index e96329ceb28c8440e470e3e073226ed24995a462..e9ef6d6b51d5b07f6471e96dc20efa5685416220 100644 (file)
@@ -562,6 +562,9 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...);
 extern __printf(2, 3)
 int __trace_printk(unsigned long ip, const char *fmt, ...);
 
+extern int __trace_bputs(unsigned long ip, const char *str);
+extern int __trace_puts(unsigned long ip, const char *str, int size);
+
 /**
  * trace_puts - write a string into the ftrace buffer
  * @str: the string to record
@@ -587,8 +590,6 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
  *  (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
  */
 
-extern int __trace_bputs(unsigned long ip, const char *str);
-extern int __trace_puts(unsigned long ip, const char *str, int size);
 #define trace_puts(str) ({                                             \
        static const char *trace_printk_fmt                             \
                __attribute__((section("__trace_printk_fmt"))) =        \
index e15828fd71f1b589780b933549e3b07c9652c1e9..484604d184be7380807868d9a12edc80e9064db3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 struct kref {
        atomic_t refcount;
@@ -98,6 +99,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
        return kref_sub(kref, 1, release);
 }
 
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ *          last reference to the object is released.
+ *          This pointer is required, and it is not acceptable to pass kfree
+ *          in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception.  If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count.  The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+               void (*release)(struct kref *kref),
+               spinlock_t *lock)
+{
+       unsigned long flags;
+
+       WARN_ON(release == NULL);
+       if (atomic_add_unless(&kref->refcount, -1, 1))
+               return 0;
+       spin_lock_irqsave(lock, flags);
+       if (atomic_dec_and_test(&kref->refcount)) {
+               release(kref);
+               local_irq_restore(flags);
+               return 1;
+       }
+       spin_unlock_irqrestore(lock, flags);
+       return 0;
+}
+
 static inline int kref_put_mutex(struct kref *kref,
                                 void (*release)(struct kref *kref),
                                 struct mutex *lock)
index bbca12804d12434ebfb100487c460c2ed21f99ed..b4fa5e4cd158dfa5e8bdda0c9e14df77818abe65 100644 (file)
@@ -323,6 +323,11 @@ static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
        return ktime_add_ns(kt, usec * 1000);
 }
 
+static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
+{
+       return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
+}
+
 static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
 {
        return ktime_sub_ns(kt, usec * 1000);
@@ -366,7 +371,15 @@ extern void ktime_get_ts(struct timespec *ts);
 static inline ktime_t ns_to_ktime(u64 ns)
 {
        static const ktime_t ktime_zero = { .tv64 = 0 };
+
        return ktime_add_ns(ktime_zero, ns);
 }
 
+static inline ktime_t ms_to_ktime(u64 ms)
+{
+       static const ktime_t ktime_zero = { .tv64 = 0 };
+
+       return ktime_add_ms(ktime_zero, ms);
+}
+
 #endif
index 6a1f8df9144bcfd5c1af3e9cbad600fe61749c29..b83e5657365adcd30bd816d7d0e57ce3d3691980 100644 (file)
@@ -361,6 +361,17 @@ static inline void list_splice_tail_init(struct list_head *list,
 #define list_first_entry(ptr, type, member) \
        list_entry((ptr)->next, type, member)
 
+/**
+ * list_first_entry_or_null - get the first element from a list
+ * @ptr:       the list head to take the element from.
+ * @type:      the type of the struct this is embedded in.
+ * @member:    the name of the list_struct within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_first_entry_or_null(ptr, type, member) \
+       (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
 /**
  * list_for_each       -       iterate over a list
  * @pos:       the &struct list_head to use as a loop cursor.
index dd3c34ebca9a67e05c2e59649da81486210e802c..8e9a029e093d6522b68c0929aad93704f47c3ad5 100644 (file)
@@ -14,6 +14,8 @@
 #define MARVELL_PHY_ID_88E1149R                0x01410e50
 #define MARVELL_PHY_ID_88E1240         0x01410e30
 #define MARVELL_PHY_ID_88E1318S                0x01410e90
+#define MARVELL_PHY_ID_88E1116R                0x01410e40
+#define MARVELL_PHY_ID_88E1510         0x01410dd0
 
 /* struct phy_device dev_flags definitions */
 #define MARVELL_PHY_M1145_FLAGS_RESISTANCE     0x00000001
index b8ba85544721fd911ce52c61e08a8ba88328ecfe..2913b86eb12a7a1068991b9342e7ed43c8eec1fe 100644 (file)
@@ -6,7 +6,8 @@
 
 #if BITS_PER_LONG == 64
 
-#define div64_long(x,y) div64_s64((x),(y))
+#define div64_long(x, y) div64_s64((x), (y))
+#define div64_ul(x, y)   div64_u64((x), (y))
 
 /**
  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
@@ -47,7 +48,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
 
 #elif BITS_PER_LONG == 32
 
-#define div64_long(x,y) div_s64((x),(y))
+#define div64_long(x, y) div_s64((x), (y))
+#define div64_ul(x, y)   div_u64((x), (y))
 
 #ifndef div_u64_rem
 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
index fb1bf7d6a41018edf8358c76ffce8b74e2282cba..0390d5943ed6db1885ecf67612d7cd1da60e908b 100644 (file)
@@ -373,13 +373,11 @@ struct ab8500_sysctrl_platform_data;
 /**
  * struct ab8500_platform_data - AB8500 platform data
  * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
- * @pm_power_off: Should machine pm power off hook be registered or not
  * @init: board-specific initialization after detection of ab8500
  * @regulator: machine-specific constraints for regulators
  */
 struct ab8500_platform_data {
        int irq_base;
-       bool pm_power_off;
        void (*init) (struct ab8500 *);
        struct ab8500_regulator_platform_data *regulator;
        struct abx500_gpio_platform_data *gpio;
index adf6e0648f20cb209413a2137f0b688862a7e559..8074a9711cf1c1f3957088c8a73efe838eb6bd38 100644 (file)
@@ -237,7 +237,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
-
+int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
 
 #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
 
index 67f46ad6920a0bffeefb2efe0c437a7a2f86a883..352eec9df1b84097420a88953ce7276cbb637b69 100644 (file)
@@ -126,7 +126,7 @@ struct mlx4_rss_context {
 
 struct mlx4_qp_path {
        u8                      fl;
-       u8                      reserved1[1];
+       u8                      vlan_control;
        u8                      disable_pkey_check;
        u8                      pkey_index;
        u8                      counter_index;
@@ -141,11 +141,32 @@ struct mlx4_qp_path {
        u8                      sched_queue;
        u8                      vlan_index;
        u8                      feup;
-       u8                      reserved3;
+       u8                      fvl_rx;
        u8                      reserved4[2];
        u8                      dmac[6];
 };
 
+enum { /* fl */
+       MLX4_FL_CV      = 1 << 6,
+       MLX4_FL_ETH_HIDE_CQE_VLAN       = 1 << 2
+};
+enum { /* vlan_control */
+       MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED      = 1 << 6,
+       MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED      = 1 << 2,
+       MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
+       MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED    = 1 << 0
+};
+
+enum { /* feup */
+       MLX4_FEUP_FORCE_ETH_UP          = 1 << 6, /* force Eth UP */
+       MLX4_FSM_FORCE_ETH_SRC_MAC      = 1 << 5, /* force Source MAC */
+       MLX4_FVL_FORCE_ETH_VLAN         = 1 << 3  /* force Eth vlan */
+};
+
+enum { /* fvl_rx */
+       MLX4_FVL_RX_FORCE_ETH_VLAN      = 1 << 0 /* enforce Eth rx vlan */
+};
+
 struct mlx4_qp_context {
        __be32                  flags;
        __be32                  pd;
@@ -185,6 +206,10 @@ struct mlx4_qp_context {
        u32                     reserved5[10];
 };
 
+enum { /* param3 */
+       MLX4_STRIP_VLAN = 1 << 30
+};
+
 /* Which firmware version adds support for NEC (NoErrorCompletion) bit */
 #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
 
index 141d395bbb5f1a215483c90b2a10d5bd0bc58ff7..6e8215b159982378847cc7aacd75747da9c604d6 100644 (file)
@@ -30,6 +30,7 @@ struct mv643xx_eth_shared_platform_data {
 #define MV643XX_ETH_PHY_ADDR(x)                (0x80 | (x))
 #define MV643XX_ETH_PHY_NONE           0xff
 
+struct device_node;
 struct mv643xx_eth_platform_data {
        /*
         * Pointer back to our parent instance, and our port number.
@@ -41,6 +42,7 @@ struct mv643xx_eth_platform_data {
         * Whether a PHY is present, and if yes, at which address.
         */
        int                     phy_addr;
+       struct device_node      *phy_node;
 
        /*
         * Use this MAC address if it is valid, overriding the
index 99c9f0c103c23ccaab15e69fe6f684cd8e21c943..4f27575ce1d67ebe74d1bba4f40a70f6edbdea11 100644 (file)
@@ -79,9 +79,9 @@ enum sock_type {
 #endif /* ARCH_HAS_SOCKET_TYPES */
 
 enum sock_shutdown_cmd {
-       SHUT_RD         = 0,
-       SHUT_WR         = 1,
-       SHUT_RDWR       = 2,
+       SHUT_RD,
+       SHUT_WR,
+       SHUT_RDWR,
 };
 
 struct socket_wq {
index 09906b7ca47d605dec59a9e21232f68660974562..a2a89a5c7be55b15baec6271a8cc4329b6f642bd 100644 (file)
@@ -43,8 +43,9 @@ enum {
        NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
        NETIF_F_GSO_GRE_BIT,            /* ... GRE with TSO */
        NETIF_F_GSO_UDP_TUNNEL_BIT,     /* ... UDP TUNNEL with TSO */
+       NETIF_F_GSO_MPLS_BIT,           /* ... MPLS segmentation */
        /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
-               NETIF_F_GSO_UDP_TUNNEL_BIT,
+               NETIF_F_GSO_MPLS_BIT,
 
        NETIF_F_FCOE_CRC_BIT,           /* FCoE CRC32 */
        NETIF_F_SCTP_CSUM_BIT,          /* SCTP checksum offload */
@@ -107,6 +108,7 @@ enum {
 #define NETIF_F_RXALL          __NETIF_F(RXALL)
 #define NETIF_F_GSO_GRE                __NETIF_F(GSO_GRE)
 #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
+#define NETIF_F_GSO_MPLS       __NETIF_F(GSO_MPLS)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX        __NETIF_F(HW_VLAN_STAG_RX)
 #define NETIF_F_HW_VLAN_STAG_TX        __NETIF_F(HW_VLAN_STAG_TX)
index a94a5a0ab122e8fc577df4db38eb65c1ec543f9c..09b4188c1ea79cc0fa3eed0f4a668d71b930ea55 100644 (file)
@@ -324,12 +324,15 @@ struct napi_struct {
        struct sk_buff          *gro_list;
        struct sk_buff          *skb;
        struct list_head        dev_list;
+       struct hlist_node       napi_hash_node;
+       unsigned int            napi_id;
 };
 
 enum {
        NAPI_STATE_SCHED,       /* Poll is scheduled */
        NAPI_STATE_DISABLE,     /* Disable pending */
        NAPI_STATE_NPSVC,       /* Netpoll - don't dequeue from poll_list */
+       NAPI_STATE_HASHED,      /* In NAPI hash */
 };
 
 enum gro_result {
@@ -445,6 +448,32 @@ static inline bool napi_reschedule(struct napi_struct *napi)
 extern void __napi_complete(struct napi_struct *n);
 extern void napi_complete(struct napi_struct *n);
 
+/**
+ *     napi_by_id - lookup a NAPI by napi_id
+ *     @napi_id: hashed napi_id
+ *
+ * lookup @napi_id in napi_hash table
+ * must be called under rcu_read_lock()
+ */
+extern struct napi_struct *napi_by_id(unsigned int napi_id);
+
+/**
+ *     napi_hash_add - add a NAPI to global hashtable
+ *     @napi: napi context
+ *
+ * generate a new napi_id and store a @napi under it in napi_hash
+ */
+extern void napi_hash_add(struct napi_struct *napi);
+
+/**
+ *     napi_hash_del - remove a NAPI from global table
+ *     @napi: napi context
+ *
+ * Warning: caller must observe rcu grace period
+ * before freeing memory containing @napi
+ */
+extern void napi_hash_del(struct napi_struct *napi);
+
 /**
  *     napi_disable - prevent NAPI from scheduling
  *     @n: napi context
@@ -800,6 +829,7 @@ struct netdev_fcoe_hbainfo {
  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  * int (*ndo_get_vf_config)(struct net_device *dev,
  *                         int vf, struct ifla_vf_info *ivf);
+ * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  *                       struct nlattr *port[]);
  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
@@ -942,6 +972,9 @@ struct net_device_ops {
                                                     struct netpoll_info *info,
                                                     gfp_t gfp);
        void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
+#endif
+#ifdef CONFIG_NET_LL_RX_POLL
+       int                     (*ndo_ll_poll)(struct napi_struct *dev);
 #endif
        int                     (*ndo_set_vf_mac)(struct net_device *dev,
                                                  int queue, u8 *mac);
@@ -954,6 +987,8 @@ struct net_device_ops {
        int                     (*ndo_get_vf_config)(struct net_device *dev,
                                                     int vf,
                                                     struct ifla_vf_info *ivf);
+       int                     (*ndo_set_vf_link_state)(struct net_device *dev,
+                                                        int vf, int link_state);
        int                     (*ndo_set_vf_port)(struct net_device *dev,
                                                   int vf,
                                                   struct nlattr *port[]);
@@ -1088,6 +1123,8 @@ struct net_device {
         * need to set them appropriately.
         */
        netdev_features_t       hw_enc_features;
+       /* mask of fetures inheritable by MPLS */
+       netdev_features_t       mpls_features;
 
        /* Interface index. Unique device identifier    */
        int                     ifindex;
@@ -1140,8 +1177,10 @@ struct net_device {
        unsigned char           addr_assign_type; /* hw address assignment type */
        unsigned char           addr_len;       /* hardware address length      */
        unsigned char           neigh_priv_len;
-       unsigned short          dev_id;         /* for shared network cards */
-
+       unsigned short          dev_id;         /* Used to differentiate devices
+                                                * that share the same link
+                                                * layer address
+                                                */
        spinlock_t              addr_list_lock;
        struct netdev_hw_addr_list      uc;     /* Unicast mac addresses */
        struct netdev_hw_addr_list      mc;     /* Multicast mac addresses */
@@ -1593,9 +1632,34 @@ struct packet_offload {
 #define NETDEV_RELEASE         0x0012
 #define NETDEV_NOTIFY_PEERS    0x0013
 #define NETDEV_JOIN            0x0014
+#define NETDEV_CHANGEUPPER     0x0015
 
 extern int register_netdevice_notifier(struct notifier_block *nb);
 extern int unregister_netdevice_notifier(struct notifier_block *nb);
+
+struct netdev_notifier_info {
+       struct net_device *dev;
+};
+
+struct netdev_notifier_change_info {
+       struct netdev_notifier_info info; /* must be first */
+       unsigned int flags_changed;
+};
+
+static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
+                                            struct net_device *dev)
+{
+       info->dev = dev;
+}
+
+static inline struct net_device *
+netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
+{
+       return info->dev;
+}
+
+extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+                                        struct netdev_notifier_info *info);
 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 
 
@@ -1778,6 +1842,19 @@ static inline int unregister_gifconf(unsigned int family)
        return register_gifconf(family, NULL);
 }
 
+#ifdef CONFIG_NET_FLOW_LIMIT
+#define FLOW_LIMIT_HISTORY     (1 << 7)  /* must be ^2 and !overflow buckets */
+struct sd_flow_limit {
+       u64                     count;
+       unsigned int            num_buckets;
+       unsigned int            history_head;
+       u16                     history[FLOW_LIMIT_HISTORY];
+       u8                      buckets[];
+};
+
+extern int netdev_flow_limit_table_len;
+#endif /* CONFIG_NET_FLOW_LIMIT */
+
 /*
  * Incoming packets are placed on per-cpu queues
  */
@@ -1807,6 +1884,10 @@ struct softnet_data {
        unsigned int            dropped;
        struct sk_buff_head     input_pkt_queue;
        struct napi_struct      backlog;
+
+#ifdef CONFIG_NET_FLOW_LIMIT
+       struct sd_flow_limit __rcu *flow_limit;
+#endif
 };
 
 static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -2733,6 +2814,17 @@ static inline netdev_features_t netdev_get_wanted_features(
 }
 netdev_features_t netdev_increment_features(netdev_features_t all,
        netdev_features_t one, netdev_features_t mask);
+
+/* Allow TSO being used on stacked device :
+ * Performing the GSO segmentation before last device
+ * is a performance improvement.
+ */
+static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
+                                                       netdev_features_t mask)
+{
+       return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
+}
+
 int __netdev_update_features(struct net_device *dev);
 void netdev_update_features(struct net_device *dev);
 void netdev_change_features(struct net_device *dev);
index 0060fde3160e0d029d29f527b3b4330850ebb356..de70f7b45b682126284a4162535659bfde8e5be1 100644 (file)
@@ -35,7 +35,7 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
        result->all[3] = a1->all[3] & mask->all[3];
 }
 
-extern void netfilter_init(void);
+extern int netfilter_init(void);
 
 /* Largest hook number + 1 */
 #define NF_MAX_HOOKS 8
index 98ffb54988b6d79d16c7364212337e6312271393..2d4df6ce043efab2f9017bc5359b90846f085fb7 100644 (file)
@@ -17,6 +17,22 @@ extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 
 extern int ipv6_netfilter_init(void);
 extern void ipv6_netfilter_fini(void);
+
+/*
+ * Hook functions for ipv6 to allow xt_* modules to be built-in even
+ * if IPv6 is a module.
+ */
+struct nf_ipv6_ops {
+       int (*chk_addr)(struct net *net, const struct in6_addr *addr,
+                       const struct net_device *dev, int strict);
+};
+
+extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
+{
+       return rcu_dereference(nf_ipv6_ops);
+}
+
 #else /* CONFIG_NETFILTER */
 static inline int ipv6_netfilter_init(void) { return 0; }
 static inline void ipv6_netfilter_fini(void) { return; }
index 6358da5eeee8f8d89b619557631613762d09c883..86fde81ac2e6177bae7640983c4b2ea59b083065 100644 (file)
@@ -46,6 +46,7 @@ struct netlink_kernel_cfg {
        void            (*input)(struct sk_buff *skb);
        struct mutex    *cb_mutex;
        void            (*bind)(int group);
+       bool            (*compare)(struct net *net, struct sock *sk);
 };
 
 extern struct sock *__netlink_kernel_create(struct net *net, int unit,
@@ -144,4 +145,14 @@ static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        return __netlink_dump_start(ssk, skb, nlh, control);
 }
 
+struct netlink_tap {
+       struct net_device *dev;
+       struct module *module;
+       struct list_head list;
+};
+
+extern int netlink_add_tap(struct netlink_tap *nt);
+extern int __netlink_remove_tap(struct netlink_tap *nt);
+extern int netlink_remove_tap(struct netlink_tap *nt);
+
 #endif /* __LINUX_NETLINK_H */
index fa2cb76a702933c1a6d03cc915e6f63c4a5dc0cf..f3c7c24bec1ca99c89270611d87f9c264c3eb786 100644 (file)
@@ -53,10 +53,10 @@ struct netpoll_info {
 };
 
 #ifdef CONFIG_NETPOLL
-extern int netpoll_rx_disable(struct net_device *dev);
+extern void netpoll_rx_disable(struct net_device *dev);
 extern void netpoll_rx_enable(struct net_device *dev);
 #else
-static inline int netpoll_rx_disable(struct net_device *dev) { return 0; }
+static inline void netpoll_rx_disable(struct net_device *dev) { return; }
 static inline void netpoll_rx_enable(struct net_device *dev) { return; }
 #endif
 
index 3863a4dbdf1888c51ff9d4ae94057a31e5118034..2a93b64a3869f2af9ea623b8ddc0a435668b3c8c 100644 (file)
  *
  */
 
-#ifdef CONFIG_OF_DEVICE
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
+
+#ifdef CONFIG_OF_DEVICE
 #include <linux/pm.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
@@ -100,7 +101,7 @@ extern int of_platform_populate(struct device_node *root,
 
 #if !defined(CONFIG_OF_ADDRESS)
 struct of_dev_auxdata;
-struct device;
+struct device_node;
 static inline int of_platform_populate(struct device_node *root,
                                        const struct of_device_id *matches,
                                        const struct of_dev_auxdata *lookup,
index 81b31613eb252b3a46052d2375e333affc91c77f..1704479772787b28950c9768a9e73c35c88dcd9a 100644 (file)
@@ -60,11 +60,13 @@ static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
 void acpiphp_init(void);
 void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
 void acpiphp_remove_slots(struct pci_bus *bus);
+void acpiphp_check_host_bridge(acpi_handle handle);
 #else
 static inline void acpiphp_init(void) { }
 static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
                                           acpi_handle handle) { }
 static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
+static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
 #endif
 
 #else  /* CONFIG_ACPI */
index 9e11039dd7a3b89c40a19d10ebb3c382aed5dd7e..64ab823f7b7451f77aa4b58df22b4a3a54c30db1 100644 (file)
@@ -49,6 +49,7 @@
 
 #define PHY_HAS_INTERRUPT      0x00000001
 #define PHY_HAS_MAGICANEG      0x00000002
+#define PHY_IS_INTERNAL                0x00000004
 
 /* Interface Mode definitions */
 typedef enum {
@@ -57,6 +58,7 @@ typedef enum {
        PHY_INTERFACE_MODE_GMII,
        PHY_INTERFACE_MODE_SGMII,
        PHY_INTERFACE_MODE_TBI,
+       PHY_INTERFACE_MODE_REVMII,
        PHY_INTERFACE_MODE_RMII,
        PHY_INTERFACE_MODE_RGMII,
        PHY_INTERFACE_MODE_RGMII_ID,
@@ -261,6 +263,7 @@ struct phy_c45_device_ids {
  * phy_id: UID for this device found during discovery
  * c45_ids: 802.3-c45 Device Identifers if is_c45.
  * is_c45:  Set to true if this phy uses clause 45 addressing.
+ * is_internal: Set to true if this phy is internal to a MAC.
  * state: state of the PHY for management purposes
  * dev_flags: Device-specific flags used by the PHY driver.
  * addr: Bus address of PHY
@@ -298,6 +301,7 @@ struct phy_device {
 
        struct phy_c45_device_ids c45_ids;
        bool is_c45;
+       bool is_internal;
 
        enum phy_state state;
 
@@ -508,6 +512,27 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
        return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
 }
 
+/**
+ * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
+ * @phydev: the phy_device struct
+ *
+ * NOTE: must be kept in sync with addition/removal of PHY_POLL and
+ * PHY_IGNORE_INTERRUPT
+ */
+static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
+{
+       return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
+}
+
+/**
+ * phy_is_internal - Convenience function for testing if a PHY is internal
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_internal(struct phy_device *phydev)
+{
+       return phydev->is_internal;
+}
+
 struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
                bool is_c45, struct phy_c45_device_ids *c45_ids);
 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
@@ -545,6 +570,8 @@ void phy_drivers_unregister(struct phy_driver *drv, int n);
 int phy_driver_register(struct phy_driver *new_driver);
 int phy_drivers_register(struct phy_driver *new_driver, int n);
 void phy_state_machine(struct work_struct *work);
+void phy_change(struct work_struct *work);
+void phy_mac_interrupt(struct phy_device *phydev, int new_link);
 void phy_start_machine(struct phy_device *phydev,
                void (*handler)(struct net_device *));
 void phy_stop_machine(struct phy_device *phydev);
index 72474e18f1e0d8fef7441d3f11269eb90059b684..6aa238096622441de2125f3a7658fe2777f349a1 100644 (file)
  *     if it is 0, pull-down is disabled.
  * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
  *     low, this is the most typical case and is typically achieved with two
- *     active transistors on the output. Sending this config will enabale
+ *     active transistors on the output. Setting this config will enable
  *     push-pull mode, the argument is ignored.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *     collector) which means it is usually wired with other output ports
- *     which are then pulled up with an external resistor. Sending this
- *     config will enabale open drain mode, the argument is ignored.
+ *     which are then pulled up with an external resistor. Setting this
+ *     config will enable open drain mode, the argument is ignored.
  * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
- *     (open emitter). Sending this config will enabale open drain mode, the
+ *     (open emitter). Setting this config will enable open drain mode, the
  *     argument is ignored.
- * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as
- *     argument. The argument is in mA.
+ * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
+ *     passed as argument. The argument is in mA.
  * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
  *      If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
  *      schmitt-trigger mode is disabled.
index 528e73ce46d2e2903b30172f61627ea113b8be02..23901992b9ddc2b4f67dc50ef0fe6e64f8173d2f 100644 (file)
 #ifndef __CLK_LPSS_H
 #define __CLK_LPSS_H
 
+struct lpss_clk_data {
+       const char *name;
+       struct clk *clk;
+};
+
 extern int lpt_clk_init(void);
 
 #endif /* __CLK_LPSS_H */
index ff9b0aab5281c2251e93f5a6f56ea09cf2e31289..c860c1b314c0473a7737cd6c7dacd65344fbc656 100644 (file)
@@ -43,8 +43,6 @@ struct omap_uart_port_info {
        int                     DTR_present;
 
        int (*get_context_loss_count)(struct device *);
-       void (*set_forceidle)(struct device *);
-       void (*set_noidle)(struct device *);
        void (*enable_wakeup)(struct device *, bool);
 };
 
index 6af944ab38f05638e7443122abbc1ec5592dfcbe..22c7052e937248e4c3337778d8608b3508a24ce5 100644 (file)
@@ -4,6 +4,7 @@
 #include <stdarg.h>
 #include <linux/init.h>
 #include <linux/kern_levels.h>
+#include <linux/linkage.h>
 
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
index 8089e35d47aca865b660d7aca94ad01a59111652..f4b1001a4676b48bfb04e74d3051813c90875752 100644 (file)
@@ -460,6 +460,26 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
                pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
                        &(pos)->member)), typeof(*(pos)), member))
 
+/**
+ * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
+ * @pos:       the type * to use as a loop cursor.
+ * @head:      the head for your list.
+ * @member:    the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ *
+ * This is the same as hlist_for_each_entry_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hlist_for_each_entry_rcu_notrace(pos, head, member)                    \
+       for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
+                       typeof(*(pos)), member);                        \
+               pos;                                                    \
+               pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
+                       &(pos)->member)), typeof(*(pos)), member))
+
 /**
  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
  * @pos:       the type * to use as a loop cursor.
index 2ae13714828bc42568e77684fedfc2cf929c291d..1c33dd7da4a7d860004264e00bd406645df1ed96 100644 (file)
@@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
  * @head:      the head for your list.
  * @member:    the name of the hlist_nulls_node within the struct.
  *
+ * The barrier() is needed to make sure compiler doesn't cache first element [1],
+ * as this loop can be restarted [2]
+ * [1] Documentation/atomic_ops.txt around line 114
+ * [2] Documentation/RCU/rculist_nulls.txt around line 146
  */
 #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member)                        \
-       for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));            \
+       for (({barrier();}),                                                    \
+            pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));            \
                (!is_a_nulls(pos)) &&                                           \
                ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
                pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
index 4ccd68e49b00dbb9ef88006d31f9cfbadec3272a..ddcc7826d9075dba200be0cd96aaef77afb9a74e 100644 (file)
@@ -640,6 +640,15 @@ static inline void rcu_preempt_sleep_check(void)
 
 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
 
+/*
+ * The tracing infrastructure traces RCU (we want that), but unfortunately
+ * some of the RCU checks causes tracing to lock up the system.
+ *
+ * The tracing version of rcu_dereference_raw() must not call
+ * rcu_read_lock_held().
+ */
+#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
+
 /**
  * rcu_access_index() - fetch RCU index with no dereferencing
  * @p: The index to read
index a3e7842786679567185ecdbc8835fd5936fb36f4..18e099342e6f82732a93b347b021ca7aa4e2b22d 100644 (file)
@@ -83,7 +83,6 @@
 
 extern struct bus_type rio_bus_type;
 extern struct device rio_bus;
-extern struct list_head rio_devices;   /* list of all devices */
 
 struct rio_mport;
 struct rio_dev;
@@ -237,6 +236,7 @@ enum rio_phy_type {
  * @name: Port name string
  * @priv: Master port private data
  * @dma: DMA device associated with mport
+ * @nscan: RapidIO network enumeration/discovery operations
  */
 struct rio_mport {
        struct list_head dbells;        /* list of doorbell events */
@@ -262,8 +262,14 @@ struct rio_mport {
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct dma_device       dma;
 #endif
+       struct rio_scan *nscan;
 };
 
+/*
+ * Enumeration/discovery control flags
+ */
+#define RIO_SCAN_ENUM_NO_WAIT  0x00000001 /* Do not wait for enum completed */
+
 struct rio_id_table {
        u16 start;      /* logical minimal id */
        u32 max;        /* max number of IDs in table */
@@ -460,6 +466,16 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
 }
 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
 
+/**
+ * struct rio_scan - RIO enumeration and discovery operations
+ * @enumerate: Callback to perform RapidIO fabric enumeration.
+ * @discover: Callback to perform RapidIO fabric discovery.
+ */
+struct rio_scan {
+       int (*enumerate)(struct rio_mport *mport, u32 flags);
+       int (*discover)(struct rio_mport *mport, u32 flags);
+};
+
 /* Architecture and hardware-specific functions */
 extern int rio_register_mport(struct rio_mport *);
 extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
index b75c05920ab58f976cb73223ce7a7da97a4b146d..5059994fe2970edf8942e537fd3fe81144c2ccb3 100644 (file)
@@ -433,5 +433,6 @@ extern u16 rio_local_get_device_id(struct rio_mport *port);
 extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
 extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
                                   struct rio_dev *from);
+extern int rio_init_mports(void);
 
 #endif                         /* LINUX_RIO_DRV_H */
index 5951e3f38878398d36eef96e0482ebae15bf8409..26806775b11b3932711bc796d1284171d92f58fb 100644 (file)
@@ -111,6 +111,9 @@ static inline struct page *sg_page(struct scatterlist *sg)
 static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
                              unsigned int buflen)
 {
+#ifdef CONFIG_DEBUG_SG
+       BUG_ON(!virt_addr_valid(buf));
+#endif
        sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
 }
 
index 178a8d909f14a3dcdcbc0ce255572975c8b3b221..4ff8da189253a96fa9076149f188f7c5d947402e 100644 (file)
@@ -2444,6 +2444,15 @@ extern int __cond_resched_softirq(void);
        __cond_resched_softirq();                                       \
 })
 
+static inline void cond_resched_rcu(void)
+{
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
+       rcu_read_unlock();
+       cond_resched();
+       rcu_read_lock();
+#endif
+}
+
 /*
  * Does a critical section need to be broken due to another
  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
index 2e0ced1af3b10458f4507560bd9c703c2661425f..6b06023e8a08d7f81303d182cf8c06fc76ac5940 100644 (file)
@@ -319,6 +319,8 @@ enum {
        SKB_GSO_GRE = 1 << 6,
 
        SKB_GSO_UDP_TUNNEL = 1 << 7,
+
+       SKB_GSO_MPLS = 1 << 8,
 };
 
 #if BITS_PER_LONG > 32
@@ -384,11 +386,13 @@ typedef unsigned char *sk_buff_data_t;
  *     @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
  *     @dma_cookie: a cookie to one of several possible DMA operations
  *             done by skb DMA functions
+  *    @napi_id: id of the NAPI struct this skb came from
  *     @secmark: security marking
  *     @mark: Generic packet mark
  *     @dropcount: total number of sk_receive_queue overflows
  *     @vlan_proto: vlan encapsulation protocol
  *     @vlan_tci: vlan tag control information
+ *     @inner_protocol: Protocol (encapsulation)
  *     @inner_transport_header: Inner transport layer header (encapsulation)
  *     @inner_network_header: Network layer header (encapsulation)
  *     @inner_mac_header: Link layer header (encapsulation)
@@ -497,8 +501,11 @@ struct sk_buff {
        /* 7/9 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
 
-#ifdef CONFIG_NET_DMA
-       dma_cookie_t            dma_cookie;
+#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL
+       union {
+               unsigned int    napi_id;
+               dma_cookie_t    dma_cookie;
+       };
 #endif
 #ifdef CONFIG_NETWORK_SECMARK
        __u32                   secmark;
@@ -509,12 +516,13 @@ struct sk_buff {
                __u32           reserved_tailroom;
        };
 
-       sk_buff_data_t          inner_transport_header;
-       sk_buff_data_t          inner_network_header;
-       sk_buff_data_t          inner_mac_header;
-       sk_buff_data_t          transport_header;
-       sk_buff_data_t          network_header;
-       sk_buff_data_t          mac_header;
+       __be16                  inner_protocol;
+       __u16                   inner_transport_header;
+       __u16                   inner_network_header;
+       __u16                   inner_mac_header;
+       __u16                   transport_header;
+       __u16                   network_header;
+       __u16                   mac_header;
        /* These elements must be at the end, see alloc_skb() for details.  */
        sk_buff_data_t          tail;
        sk_buff_data_t          end;
@@ -1387,6 +1395,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
        skb_reset_tail_pointer(skb);
        skb->tail += offset;
 }
+
 #else /* NET_SKBUFF_DATA_USES_OFFSET */
 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
 {
@@ -1527,7 +1536,6 @@ static inline void skb_reset_mac_len(struct sk_buff *skb)
        skb->mac_len = skb->network_header - skb->mac_header;
 }
 
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
                                                        *skb)
 {
@@ -1581,7 +1589,7 @@ static inline void skb_set_inner_mac_header(struct sk_buff *skb,
 }
 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
 {
-       return skb->transport_header != ~0U;
+       return skb->transport_header != (typeof(skb->transport_header))~0U;
 }
 
 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
@@ -1624,7 +1632,7 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
 
 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
 {
-       return skb->mac_header != ~0U;
+       return skb->mac_header != (typeof(skb->mac_header))~0U;
 }
 
 static inline void skb_reset_mac_header(struct sk_buff *skb)
@@ -1638,112 +1646,6 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
        skb->mac_header += offset;
 }
 
-#else /* NET_SKBUFF_DATA_USES_OFFSET */
-static inline unsigned char *skb_inner_transport_header(const struct sk_buff
-                                                       *skb)
-{
-       return skb->inner_transport_header;
-}
-
-static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
-{
-       skb->inner_transport_header = skb->data;
-}
-
-static inline void skb_set_inner_transport_header(struct sk_buff *skb,
-                                                  const int offset)
-{
-       skb->inner_transport_header = skb->data + offset;
-}
-
-static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
-{
-       return skb->inner_network_header;
-}
-
-static inline void skb_reset_inner_network_header(struct sk_buff *skb)
-{
-       skb->inner_network_header = skb->data;
-}
-
-static inline void skb_set_inner_network_header(struct sk_buff *skb,
-                                               const int offset)
-{
-       skb->inner_network_header = skb->data + offset;
-}
-
-static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
-{
-       return skb->inner_mac_header;
-}
-
-static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
-{
-       skb->inner_mac_header = skb->data;
-}
-
-static inline void skb_set_inner_mac_header(struct sk_buff *skb,
-                                               const int offset)
-{
-       skb->inner_mac_header = skb->data + offset;
-}
-static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
-{
-       return skb->transport_header != NULL;
-}
-
-static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
-{
-       return skb->transport_header;
-}
-
-static inline void skb_reset_transport_header(struct sk_buff *skb)
-{
-       skb->transport_header = skb->data;
-}
-
-static inline void skb_set_transport_header(struct sk_buff *skb,
-                                           const int offset)
-{
-       skb->transport_header = skb->data + offset;
-}
-
-static inline unsigned char *skb_network_header(const struct sk_buff *skb)
-{
-       return skb->network_header;
-}
-
-static inline void skb_reset_network_header(struct sk_buff *skb)
-{
-       skb->network_header = skb->data;
-}
-
-static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
-{
-       skb->network_header = skb->data + offset;
-}
-
-static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
-{
-       return skb->mac_header;
-}
-
-static inline int skb_mac_header_was_set(const struct sk_buff *skb)
-{
-       return skb->mac_header != NULL;
-}
-
-static inline void skb_reset_mac_header(struct sk_buff *skb)
-{
-       skb->mac_header = skb->data;
-}
-
-static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
-{
-       skb->mac_header = skb->data + offset;
-}
-#endif /* NET_SKBUFF_DATA_USES_OFFSET */
-
 static inline void skb_probe_transport_header(struct sk_buff *skb,
                                              const int offset_hint)
 {
@@ -2482,6 +2384,7 @@ extern void              skb_split(struct sk_buff *skb,
                                 struct sk_buff *skb1, const u32 len);
 extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
                                 int shiftlen);
+extern void           skb_scrub_packet(struct sk_buff *skb);
 
 extern struct sk_buff *skb_segment(struct sk_buff *skb,
                                   netdev_features_t features);
@@ -2852,6 +2755,21 @@ static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
                SKB_GSO_CB(inner_skb)->mac_offset;
 }
 
+static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
+{
+       int new_headroom, headroom;
+       int ret;
+
+       headroom = skb_headroom(skb);
+       ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
+       if (ret)
+               return ret;
+
+       new_headroom = skb_headroom(skb);
+       SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
+       return 0;
+}
+
 static inline bool skb_is_gso(const struct sk_buff *skb)
 {
        return skb_shinfo(skb)->gso_size;
index e6564c1dc552c6ca72b9b3b276fdb455a10bc716..c8488763277f0066bb731f019964f038f4107b57 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/list.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
+#include <linux/irqflags.h>
 
 extern void cpu_idle(void);
 
@@ -139,13 +140,17 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
 }
 #define smp_call_function(func, info, wait) \
                        (up_smp_call_function(func, info))
-#define on_each_cpu(func,info,wait)            \
-       ({                                      \
-               local_irq_disable();            \
-               func(info);                     \
-               local_irq_enable();             \
-               0;                              \
-       })
+
+static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       func(info);
+       local_irq_restore(flags);
+       return 0;
+}
+
 /*
  * Note we still need to test the mask even for UP
  * because we actually can get an empty mask from
index 428c37a1f95ce2eea8d9785ef69fc6050b5b7c96..b10ce4b341ea79f8a6e14e63dd458308287b9ac4 100644 (file)
@@ -305,7 +305,6 @@ struct ucred {
 
 extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
 
-extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
 extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
                               int offset, int len);
 extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
@@ -314,7 +313,6 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
                                          unsigned int len, __wsum *csump);
 
 extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
-extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
                             int offset, int len);
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
@@ -322,6 +320,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
 
 struct timespec;
 
+/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
+extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
+extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
 extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
                          unsigned int flags, struct timespec *timeout);
 extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
index 733eb5ee31c5446cbe6a11bb0b3603a6534edf49..6ff26c8db7b923853527cee3ee1ffff1ca55bdd6 100644 (file)
@@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type;
  * @modalias: Name of the driver to use with this device, or an alias
  *     for that name.  This appears in the sysfs "modalias" attribute
  *     for driver coldplugging, and in uevents used for hotplugging
- * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when
+ * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
  *     when not using a GPIO line)
  *
  * A @spi_device is used to interchange data between an SPI slave
@@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *     queue so the subsystem notifies the driver that it may relax the
  *     hardware by issuing this call
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
- *     number. Any individual value may be -EINVAL for CS lines that
+ *     number. Any individual value may be -ENOENT for CS lines that
  *     are not GPIOs (driven by the SPI controller itself).
  *
  * Each SPI master controller can communicate with one or more @spi_device
index 47ead515c81197fc897bc7ef37fead3d9ab4b39f..c5fd30d2a415a48964eb1fb2aaaac0db255656e1 100644 (file)
@@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
 
 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
                                        unsigned long address);
+extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
 #else
 
 #define make_migration_entry(page, write) swp_entry(0, 0)
@@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
 static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
                                         unsigned long address) { }
+static inline void migration_entry_wait_huge(struct mm_struct *mm,
+                                       pte_t *pte) { }
 static inline int is_write_migration_entry(swp_entry_t entry)
 {
        return 0;
index 38911391a139a37376e2053008d6e7bb49559cd5..98a3153c0f964e9e5a2d9402d15e9fa8788e2d57 100644 (file)
@@ -44,8 +44,8 @@
 /* Return size of the log buffer */
 #define SYSLOG_ACTION_SIZE_BUFFER   10
 
-#define SYSLOG_FROM_CALL 0
-#define SYSLOG_FROM_FILE 1
+#define SYSLOG_FROM_READER           0
+#define SYSLOG_FROM_PROC             1
 
 int do_syslog(int type, char __user *buf, int count, bool from_file);
 
index 5adbc33d1ab38c506c541e244024560c02686057..472120b4fac57584f30998d1be87c672607eb643 100644 (file)
@@ -246,7 +246,6 @@ struct tcp_sock {
 
        /* from STCP, retrans queue hinting */
        struct sk_buff* lost_skb_hint;
-       struct sk_buff *scoreboard_skb_hint;
        struct sk_buff *retransmit_skb_hint;
 
        struct sk_buff_head     out_of_order_queue; /* Out of order segments go here */
index 22d81b3c955b533bf73ec0b9353c6c7d4287c010..d5d229b2e5af1815d38d7d87d217ee8340b9ec1d 100644 (file)
@@ -117,14 +117,10 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
 
 extern bool persistent_clock_exist;
 
-#ifdef ALWAYS_USE_PERSISTENT_CLOCK
-#define has_persistent_clock() true
-#else
 static inline bool has_persistent_clock(void)
 {
        return persistent_clock_exist;
 }
-#endif
 
 extern void read_persistent_clock(struct timespec *ts);
 extern void read_boot_clock(struct timespec *ts);
index 2f322c38bd4d61d01e63bb89d2fa134e9b809ae6..f8e084d0fc772a59bd786871bb6b166e2303a252 100644 (file)
@@ -145,8 +145,8 @@ static inline void tracepoint_synchronize_unregister(void)
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args),                     \
                                TP_CONDITION(cond),                     \
-                               rcu_idle_exit(),                        \
-                               rcu_idle_enter());                      \
+                               rcu_irq_enter(),                        \
+                               rcu_irq_exit());                        \
        }
 #else
 #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
index 629aaf51f30b8c1fdc6e27cd086c9caf19378896..c55ce243cc0985c450786e4cb63f8ed3c8e5c53b 100644 (file)
@@ -35,4 +35,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
 }
 
 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+
+int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
 #endif
index c454a88abf2e6fed7adda903a7d2e0626613e29f..f1b0dca60f127116113361da7ff61d726d0fa26a 100644 (file)
@@ -563,9 +563,8 @@ static inline int gadget_is_dualspeed(struct usb_gadget *g)
 }
 
 /**
- * gadget_is_superspeed() - return true if the hardware handles
- * supperspeed
- * @g: controller that might support supper speed
+ * gadget_is_superspeed() - return true if the hardware handles superspeed
+ * @g: controller that might support superspeed
  */
 static inline int gadget_is_superspeed(struct usb_gadget *g)
 {
index b9b0f7b4e43b81c4829802b5f0eea0cd0713974d..302ddf55d2daca5d74291b1eb9aaa2b4127fe7d2 100644 (file)
@@ -268,6 +268,8 @@ struct usb_serial_driver {
                        struct usb_serial_port *port, struct ktermios *old);
        void (*break_ctl)(struct tty_struct *tty, int break_state);
        int  (*chars_in_buffer)(struct tty_struct *tty);
+       void (*wait_until_sent)(struct tty_struct *tty, long timeout);
+       bool (*tx_empty)(struct usb_serial_port *port);
        void (*throttle)(struct tty_struct *tty);
        void (*unthrottle)(struct tty_struct *tty);
        int  (*tiocmget)(struct tty_struct *tty);
@@ -327,6 +329,8 @@ extern void usb_serial_generic_close(struct usb_serial_port *port);
 extern int usb_serial_generic_resume(struct usb_serial *serial);
 extern int usb_serial_generic_write_room(struct tty_struct *tty);
 extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
+                                                               long timeout);
 extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
 extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
 extern void usb_serial_generic_throttle(struct tty_struct *tty);
index e8d65718560b3460b09123cb99d3a3f08685c217..0d33fca487748916faca92cad918c3726f713a57 100644 (file)
@@ -36,7 +36,7 @@ extern int fg_console, last_console, want_console;
 int vc_allocate(unsigned int console);
 int vc_cons_allocated(unsigned int console);
 int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
-void vc_deallocate(unsigned int console);
+struct vc_data *vc_deallocate(unsigned int console);
 void reset_palette(struct vc_data *vc);
 void do_blank_screen(int entering_gfx);
 void do_unblank_screen(int leaving_gfx);
index ac38be2692d89f2993381e57d6145ab75891b1d3..1133695eb0671d7aeb7f9140bf2b6adc3bc7483d 100644 (file)
@@ -217,6 +217,8 @@ do {                                                                        \
                if (!ret)                                               \
                        break;                                          \
        }                                                               \
+       if (!ret && (condition))                                        \
+               ret = 1;                                                \
        finish_wait(&wq, &__wait);                                      \
 } while (0)
 
@@ -233,8 +235,9 @@ do {                                                                        \
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
  *
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
+ * The function returns 0 if the @timeout elapsed, or the remaining
+ * jiffies (at least 1) if the @condition evaluated to %true before
+ * the @timeout elapsed.
  */
 #define wait_event_timeout(wq, condition, timeout)                     \
 ({                                                                     \
@@ -302,6 +305,8 @@ do {                                                                        \
                ret = -ERESTARTSYS;                                     \
                break;                                                  \
        }                                                               \
+       if (!ret && (condition))                                        \
+               ret = 1;                                                \
        finish_wait(&wq, &__wait);                                      \
 } while (0)
 
@@ -318,9 +323,10 @@ do {                                                                       \
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
  *
- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
- * was interrupted by a signal, and the remaining jiffies otherwise
- * if the condition evaluated to true before the timeout elapsed.
+ * Returns:
+ * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
+ * a signal, or the remaining jiffies (at least 1) if the @condition
+ * evaluated to %true before the @timeout elapsed.
  */
 #define wait_event_interruptible_timeout(wq, condition, timeout)       \
 ({                                                                     \
index 06ef7e926a66483633763a6079bd3ab5d8c40170..b8ffac7b6bab755f5adb4d648b5d34bd57eb028f 100644 (file)
@@ -18,7 +18,7 @@ struct tcf_common {
        struct tcf_t                    tcfc_tm;
        struct gnet_stats_basic_packed  tcfc_bstats;
        struct gnet_stats_queue         tcfc_qstats;
-       struct gnet_stats_rate_est      tcfc_rate_est;
+       struct gnet_stats_rate_est64    tcfc_rate_est;
        spinlock_t                      tcfc_lock;
        struct rcu_head                 tcfc_rcu;
 };
index 84a6440f1f19ee698dba94a7f899d77fc527e432..21f702704f2444272e1554c87112594d40acd421 100644 (file)
@@ -65,7 +65,7 @@ extern int                    addrconf_set_dstaddr(struct net *net,
 
 extern int                     ipv6_chk_addr(struct net *net,
                                              const struct in6_addr *addr,
-                                             struct net_device *dev,
+                                             const struct net_device *dev,
                                              int strict);
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
index a79b6cfb02a8dcc252afa516e13f76f91af6eb67..cf8439ba4d11062a326919fa885c3ae5a24e62c5 100644 (file)
@@ -30,7 +30,7 @@ extern int gnet_stats_copy_basic(struct gnet_dump *d,
                                 struct gnet_stats_basic_packed *b);
 extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
                                    const struct gnet_stats_basic_packed *b,
-                                   struct gnet_stats_rate_est *r);
+                                   struct gnet_stats_rate_est64 *r);
 extern int gnet_stats_copy_queue(struct gnet_dump *d,
                                 struct gnet_stats_queue *q);
 extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
@@ -38,13 +38,13 @@ extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
 extern int gnet_stats_finish_copy(struct gnet_dump *d);
 
 extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
-                            struct gnet_stats_rate_est *rate_est,
+                            struct gnet_stats_rate_est64 *rate_est,
                             spinlock_t *stats_lock, struct nlattr *opt);
 extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
-                              struct gnet_stats_rate_est *rate_est);
+                              struct gnet_stats_rate_est64 *rate_est);
 extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
-                                struct gnet_stats_rate_est *rate_est,
+                                struct gnet_stats_rate_est64 *rate_est,
                                 spinlock_t *stats_lock, struct nlattr *opt);
 extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
-                                const struct gnet_stats_rate_est *rate_est);
+                                const struct gnet_stats_rate_est64 *rate_est);
 #endif
index 9f03a390c826a44a700f42715c48fe6f11eef9ca..a5a4ddf053001bbdc5a33a12f8253174220c563d 100644 (file)
@@ -7,6 +7,7 @@
 #define GREPROTO_CISCO         0
 #define GREPROTO_PPTP          1
 #define GREPROTO_MAX           2
+#define GRE_IP_PROTO_MAX       2
 
 struct gre_protocol {
        int  (*handler)(struct sk_buff *skb);
@@ -22,6 +23,32 @@ struct gre_base_hdr {
 int gre_add_protocol(const struct gre_protocol *proto, u8 version);
 int gre_del_protocol(const struct gre_protocol *proto, u8 version);
 
+struct gre_cisco_protocol {
+       int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
+       int (*err_handler)(struct sk_buff *skb, u32 info,
+                          const struct tnl_ptk_info *tpi);
+       u8 priority;
+};
+
+int gre_cisco_register(struct gre_cisco_protocol *proto);
+int gre_cisco_unregister(struct gre_cisco_protocol *proto);
+void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+                     int hdr_len);
+struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
+
+static inline int ip_gre_calc_hlen(__be16 o_flags)
+{
+       int addend = 4;
+
+       if (o_flags&TUNNEL_CSUM)
+               addend += 4;
+       if (o_flags&TUNNEL_KEY)
+               addend += 4;
+       if (o_flags&TUNNEL_SEQ)
+               addend += 4;
+       return addend;
+}
+
 static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
 {
        __be16 tflags = 0;
index 100fb8cec17c0157c331fc12366d22f593368295..e4c5a2d2ba34fdd18b3faf01af1b9669cc2e82b6 100644 (file)
@@ -50,7 +50,7 @@ struct inet6_ifaddr {
 
        int                     state;
 
-       __u8                    probes;
+       __u8                    dad_probes;
        __u8                    flags;
 
        __u16                   scope;
@@ -58,7 +58,7 @@ struct inet6_ifaddr {
        unsigned long           cstamp; /* created timestamp */
        unsigned long           tstamp; /* updated timestamp */
 
-       struct timer_list       timer;
+       struct timer_list       dad_timer;
 
        struct inet6_dev        *idev;
        struct rt6_info         *rt;
@@ -74,6 +74,7 @@ struct inet6_ifaddr {
        bool                    tokenized;
 
        struct rcu_head         rcu;
+       struct in6_addr         peer_addr;
 };
 
 struct ip6_sf_socklist {
@@ -192,9 +193,12 @@ struct inet6_dev {
        struct in6_addr         token;
 
        struct neigh_parms      *nd_parms;
-       struct inet6_dev        *next;
        struct ipv6_devconf     cnf;
        struct ipv6_devstat     stats;
+
+       struct timer_list       rs_timer;
+       __u8                    rs_probes;
+
        unsigned long           tstamp; /* ipv6InterfaceTable update timestamp */
        struct rcu_head         rcu;
 };
index aab73757bc4da4c4b5fce8390ec1234a86ea55c0..3bd22795c3e259e1f1f55176c808c6fdcc994600 100644 (file)
@@ -134,12 +134,14 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
 {
        switch (skb->protocol) {
        case cpu_to_be16(ETH_P_IP):
-               if (skb->network_header + sizeof(struct iphdr) <= skb->tail)
+               if (skb_network_header(skb) + sizeof(struct iphdr) <=
+                   skb_tail_pointer(skb))
                        return IP_ECN_set_ce(ip_hdr(skb));
                break;
 
        case cpu_to_be16(ETH_P_IPV6):
-               if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail)
+               if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+                   skb_tail_pointer(skb))
                        return IP6_ECN_set_ce(ipv6_hdr(skb));
                break;
        }
index 7235ae73a1e8d07a905d6803017d341d951e8f56..b21a7f06d6a4955910b577a23b04eb904207265a 100644 (file)
@@ -32,7 +32,6 @@
  *
  * @faddr - Saved first hop address
  * @nexthop - Saved nexthop address in LSRR and SSRR
- * @is_data - Options in __data, rather than skb
  * @is_strictroute - Strict source route
  * @srr_is_hit - Packet destination addr was our one
  * @is_changed - IP checksum more not valid
index e49db91593a953422b0ce0a15c0eb0902ee33c56..44424e9dab2a7c74cdd58930bd3efa0ddb6be952 100644 (file)
@@ -51,6 +51,7 @@ struct rtable;
 
 struct fib_nh_exception {
        struct fib_nh_exception __rcu   *fnhe_next;
+       int                             fnhe_genid;
        __be32                          fnhe_daddr;
        u32                             fnhe_pmtu;
        __be32                          fnhe_gw;
index 4b6f0b28f41f097153669aa6194deae952ea58ff..781b3cf86a2f534ff71e03037b7984a0e84a8c8e 100644 (file)
@@ -42,6 +42,7 @@ struct ip_tunnel {
        struct ip_tunnel __rcu  *next;
        struct hlist_node hash_node;
        struct net_device       *dev;
+       struct net              *net;   /* netns for packet i/o */
 
        int             err_count;      /* Number of arrived ICMP errors */
        unsigned long   err_time;       /* Time when the last ICMP error
@@ -73,6 +74,7 @@ struct ip_tunnel {
 #define TUNNEL_REC     __cpu_to_be16(0x20)
 #define TUNNEL_VERSION __cpu_to_be16(0x40)
 #define TUNNEL_NO_KEY  __cpu_to_be16(0x80)
+#define TUNNEL_DONT_FRAGMENT    __cpu_to_be16(0x0100)
 
 struct tnl_ptk_info {
        __be16 flags;
@@ -92,16 +94,18 @@ struct ip_tunnel_net {
        struct net_device *fb_tunnel_dev;
 };
 
+#ifdef CONFIG_INET
+
 int ip_tunnel_init(struct net_device *dev);
 void ip_tunnel_uninit(struct net_device *dev);
 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
-int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
-                                 struct rtnl_link_ops *ops, char *devname);
+int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+                      struct rtnl_link_ops *ops, char *devname);
 
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
 
 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
-                   const struct iphdr *tnl_params);
+                   const struct iphdr *tnl_params, const u8 protocol);
 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
 
@@ -155,23 +159,31 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb,
                                  (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 }
 
-static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       int err;
-       int pkt_len = skb->len - skb_transport_offset(skb);
-       struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
+int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
+int iptunnel_xmit(struct net *net, struct rtable *rt,
+                 struct sk_buff *skb,
+                 __be32 src, __be32 dst, __u8 proto,
+                 __u8 tos, __u8 ttl, __be16 df);
 
-       nf_reset(skb);
+static inline void iptunnel_xmit_stats(int err,
+                                      struct net_device_stats *err_stats,
+                                      struct pcpu_tstats __percpu *stats)
+{
+       if (err > 0) {
+               struct pcpu_tstats *tstats = this_cpu_ptr(stats);
 
-       err = ip_local_out(skb);
-       if (likely(net_xmit_eval(err) == 0)) {
                u64_stats_update_begin(&tstats->syncp);
-               tstats->tx_bytes += pkt_len;
+               tstats->tx_bytes += err;
                tstats->tx_packets++;
                u64_stats_update_end(&tstats->syncp);
+       } else if (err < 0) {
+               err_stats->tx_errors++;
+               err_stats->tx_aborted_errors++;
        } else {
-               dev->stats.tx_errors++;
-               dev->stats.tx_aborted_errors++;
+               err_stats->tx_dropped++;
        }
 }
+
+#endif /* CONFIG_INET */
+
 #endif /* __NET_IP_TUNNELS_H */
index 4c062ccff9aa4cfa65ba9ecbbc22435c393d6620..4405886980c71dca160e19bf88cfde8f5419b216 100644 (file)
@@ -905,7 +905,7 @@ struct ip_vs_app {
 struct ipvs_master_sync_state {
        struct list_head        sync_queue;
        struct ip_vs_sync_buff  *sync_buff;
-       int                     sync_queue_len;
+       unsigned long           sync_queue_len;
        unsigned int            sync_queue_delay;
        struct task_struct      *master_thread;
        struct delayed_work     master_wakeup_work;
@@ -998,7 +998,7 @@ struct netns_ipvs {
        int                     sysctl_snat_reroute;
        int                     sysctl_sync_ver;
        int                     sysctl_sync_ports;
-       int                     sysctl_sync_qlen_max;
+       unsigned long           sysctl_sync_qlen_max;
        int                     sysctl_sync_sock_size;
        int                     sysctl_cache_bypass;
        int                     sysctl_expire_nodest_conn;
@@ -1085,7 +1085,7 @@ static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
        return ACCESS_ONCE(ipvs->sysctl_sync_ports);
 }
 
-static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
 {
        return ipvs->sysctl_sync_qlen_max;
 }
@@ -1138,7 +1138,7 @@ static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
        return 1;
 }
 
-static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
 {
        return IPVS_SYNC_QLEN_MAX;
 }
index 0810aa57c78015ac35fd8ea9caaddb0c2ab791a3..5fe5649851713e7858a2d016988cbe175ea7eaa6 100644 (file)
@@ -260,6 +260,12 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
 
 extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
 
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+                              struct icmp6hdr *thdr, int len);
+
+struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+                                     struct sock *sk, struct flowi6 *fl6);
+
 extern int                     ip6_ra_control(struct sock *sk, int sel);
 
 extern int                     ipv6_parse_hopopts(struct sk_buff *skb);
@@ -853,8 +859,8 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
 #endif
 
 #ifdef CONFIG_SYSCTL
-extern ctl_table ipv6_route_table_template[];
-extern ctl_table ipv6_icmp_table_template[];
+extern struct ctl_table ipv6_route_table_template[];
+extern struct ctl_table ipv6_icmp_table_template[];
 
 extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
 extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h
new file mode 100644 (file)
index 0000000..5bf2b3a
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Low Latency Sockets
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Author: Eliezer Tamir
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ */
+
+#ifndef _LINUX_NET_LL_POLL_H
+#define _LINUX_NET_LL_POLL_H
+
+#include <linux/netdevice.h>
+#include <net/ip.h>
+
+#ifdef CONFIG_NET_LL_RX_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_ll_read __read_mostly;
+extern unsigned int sysctl_net_ll_poll __read_mostly;
+
+/* return values from ndo_ll_poll */
+#define LL_FLUSH_FAILED                -1
+#define LL_FLUSH_BUSY          -2
+
+/* we can use sched_clock() because we don't care much about precision
+ * we only care that the average is bounded
+ * we don't mind a ~2.5% imprecision so <<10 instead of *1000
+ * sk->sk_ll_usec is a u_int so this can't overflow
+ */
+static inline u64 ll_sk_end_time(struct sock *sk)
+{
+       return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + sched_clock();
+}
+
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline u64 ll_end_time(void)
+{
+       return ((u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10) + sched_clock();
+}
+
+static inline bool sk_valid_ll(struct sock *sk)
+{
+       return sk->sk_ll_usec && sk->sk_napi_id &&
+              !need_resched() && !signal_pending(current);
+}
+
+static inline bool can_poll_ll(u64 end_time)
+{
+       return !time_after64(sched_clock(), end_time);
+}
+
+/* when used in sock_poll() nonblock is known at compile time to be true
+ * so the loop and end_time will be optimized out
+ */
+static inline bool sk_poll_ll(struct sock *sk, int nonblock)
+{
+       u64 end_time = nonblock ? 0 : ll_sk_end_time(sk);
+       const struct net_device_ops *ops;
+       struct napi_struct *napi;
+       int rc = false;
+
+       /*
+        * rcu read lock for napi hash
+        * bh so we don't race with net_rx_action
+        */
+       rcu_read_lock_bh();
+
+       napi = napi_by_id(sk->sk_napi_id);
+       if (!napi)
+               goto out;
+
+       ops = napi->dev->netdev_ops;
+       if (!ops->ndo_ll_poll)
+               goto out;
+
+       do {
+               rc = ops->ndo_ll_poll(napi);
+
+               if (rc == LL_FLUSH_FAILED)
+                       break; /* permanent failure */
+
+               if (rc > 0)
+                       /* local bh are disabled so it is ok to use _BH */
+                       NET_ADD_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+
+       } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
+                can_poll_ll(end_time));
+
+       rc = !skb_queue_empty(&sk->sk_receive_queue);
+out:
+       rcu_read_unlock_bh();
+       return rc;
+}
+
+/* used in the NIC receive handler to mark the skb */
+static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
+{
+       skb->napi_id = napi->napi_id;
+}
+
+/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
+{
+       sk->sk_napi_id = skb->napi_id;
+}
+
+#else /* CONFIG_NET_LL_RX_POLL */
+
+static inline u64 sk_ll_end_time(struct sock *sk)
+{
+       return 0;
+}
+
+static inline u64 ll_end_time(void)
+{
+       return 0;
+}
+
+static inline bool sk_valid_ll(struct sock *sk)
+{
+       return false;
+}
+
+static inline bool sk_poll_ll(struct sock *sk, int nonblock)
+{
+       return false;
+}
+
+static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
+{
+}
+
+static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
+{
+}
+
+static inline bool can_poll_ll(u64 end_time)
+{
+       return false;
+}
+
+#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* _LINUX_NET_LL_POLL_H */
index 745bf741e029c50d8a464374ec5cc4bae855c655..949d77528f2f9cd87839029b424bded13cb52136 100644 (file)
@@ -230,7 +230,7 @@ extern int                  ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
                                                           void __user *buffer,
                                                           size_t *lenp,
                                                           loff_t *ppos);
-int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl,
+int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
                                 void __user *oldval, size_t __user *oldlenp,
                                 void __user *newval, size_t newlen);
 #endif
index b176978274828206b784e7003e04c871bef582a3..495bc57f292caa6deba113f7fd6ffb5cfd8e3668 100644 (file)
@@ -118,6 +118,7 @@ struct net {
        struct netns_ipvs       *ipvs;
        struct sock             *diag_nlsk;
        atomic_t                rt_genid;
+       atomic_t                fnhe_genid;
 };
 
 /*
@@ -340,4 +341,14 @@ static inline void rt_genid_bump(struct net *net)
        atomic_inc(&net->rt_genid);
 }
 
+static inline int fnhe_genid(struct net *net)
+{
+       return atomic_read(&net->fnhe_genid);
+}
+
+static inline void fnhe_genid_bump(struct net *net)
+{
+       atomic_inc(&net->fnhe_genid);
+}
+
 #endif /* __NET_NET_NAMESPACE_H */
index 31f1fb9eb78478a6db6339065b69b280adb26916..99eac12d040ba375e0b1d8712d9a4b3005da5ee5 100644 (file)
@@ -30,7 +30,8 @@ struct nf_loginfo {
        } u;
 };
 
-typedef void nf_logfn(u_int8_t pf,
+typedef void nf_logfn(struct net *net,
+                     u_int8_t pf,
                      unsigned int hooknum,
                      const struct sk_buff *skb,
                      const struct net_device *in,
index e2dec42c2db2b5c07afc98165b140b76aa599f56..5ca3f14f0998e9962385821409d32f0ac66dbb43 100644 (file)
@@ -2,7 +2,8 @@
 #define _KER_NFNETLINK_LOG_H
 
 void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+                 u_int8_t pf,
                  unsigned int hooknum,
                  const struct sk_buff *skb,
                  const struct net_device *in,
index 5a2978d1cb22ae8713d43688b74370e44727725e..495c71f66e7e9334b3506174dc7e5352399938f4 100644 (file)
@@ -6,7 +6,7 @@ struct xt_rateest {
        struct gnet_stats_basic_packed  bstats;
        spinlock_t                      lock;
        /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
-       struct gnet_stats_rate_est      rstats;
+       struct gnet_stats_rate_est64    rstats;
 
        /* following fields not accessed in hot path */
        struct hlist_node               list;
index c24060ee411e9e15a91a0fc5bd189eef8425869b..02fe40f8c8fda4a1351cf8079b083d5220234274 100644 (file)
@@ -15,5 +15,11 @@ struct netns_xt {
        struct ebt_table *frame_filter;
        struct ebt_table *frame_nat;
 #endif
+#if IS_ENABLED(CONFIG_IP_NF_TARGET_ULOG)
+       bool ulog_warn_deprecated;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_EBT_ULOG)
+       bool ebt_ulog_warn_deprecated;
+#endif
 };
 #endif
index 682b5ae9af5165dcc71b4f366c81db7a4df20c01..5db0224b73ac47b1513ffb9361e48aa0167bb24d 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef _PING_H
 #define _PING_H
 
+#include <net/icmp.h>
 #include <net/netns/hash.h>
 
 /* PING_HTABLE_SIZE must be power of 2 */
  */
 #define GID_T_MAX (((gid_t)~0U) >> 1)
 
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+struct pingv6_ops {
+       int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len);
+       int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
+                                    struct sk_buff *skb);
+       int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
+       void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
+                               __be16 port, u32 info, u8 *payload);
+       int (*ipv6_chk_addr)(struct net *net, const struct in6_addr *addr,
+                            const struct net_device *dev, int strict);
+};
+
 struct ping_table {
        struct hlist_nulls_head hash[PING_HTABLE_SIZE];
        rwlock_t                lock;
@@ -36,20 +49,66 @@ struct ping_table {
 struct ping_iter_state {
        struct seq_net_private  p;
        int                     bucket;
+       sa_family_t             family;
 };
 
 extern struct proto ping_prot;
+extern struct ping_table ping_table;
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct pingv6_ops pingv6_ops;
+#endif
+
+struct pingfakehdr {
+       struct icmphdr icmph;
+       struct iovec *iov;
+       sa_family_t family;
+       __wsum wcheck;
+};
+
+int  ping_get_port(struct sock *sk, unsigned short ident);
+void ping_hash(struct sock *sk);
+void ping_unhash(struct sock *sk);
 
+int  ping_init_sock(struct sock *sk);
+void ping_close(struct sock *sk, long timeout);
+int  ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void ping_err(struct sk_buff *skb, int offset, u32 info);
+int  ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
+                 struct sk_buff *);
 
-extern void ping_rcv(struct sk_buff *);
-extern void ping_err(struct sk_buff *, u32 info);
+int  ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                 size_t len, int noblock, int flags, int *addr_len);
+int  ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+                        void *user_icmph, size_t icmph_len);
+int  ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                    size_t len);
+int  ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                    size_t len);
+int  ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void ping_rcv(struct sk_buff *skb);
 
 #ifdef CONFIG_PROC_FS
+struct ping_seq_afinfo {
+       char                            *name;
+       sa_family_t                     family;
+       const struct file_operations    *seq_fops;
+       const struct seq_operations     seq_ops;
+};
+
+extern const struct file_operations ping_seq_fops;
+
+void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family);
+void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+void ping_seq_stop(struct seq_file *seq, void *v);
+int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo);
+void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo);
+
 extern int __init ping_proc_init(void);
 extern void ping_proc_exit(void);
 #endif
 
 void __init ping_init(void);
-
+int  __init pingv6_init(void);
+void pingv6_exit(void);
 
 #endif /* _PING_H */
index f10818fc8804ba85ca8c2e372b5145b18ae689a9..6eab63363e59fe82dc3fc6119633e5583b663dd1 100644 (file)
@@ -58,14 +58,12 @@ struct Qdisc {
                                      * multiqueue device.
                                      */
 #define TCQ_F_WARN_NONWC       (1 << 16)
-       int                     padded;
+       u32                     limit;
        const struct Qdisc_ops  *ops;
        struct qdisc_size_table __rcu *stab;
        struct list_head        list;
        u32                     handle;
        u32                     parent;
-       atomic_t                refcnt;
-       struct gnet_stats_rate_est      rate_est;
        int                     (*reshape_fail)(struct sk_buff *skb,
                                        struct Qdisc *q);
 
@@ -76,8 +74,9 @@ struct Qdisc {
         */
        struct Qdisc            *__parent;
        struct netdev_queue     *dev_queue;
-       struct Qdisc            *next_sched;
 
+       struct gnet_stats_rate_est64    rate_est;
+       struct Qdisc            *next_sched;
        struct sk_buff          *gso_skb;
        /*
         * For performance sake on SMP, we put highly modified fields at the end
@@ -88,8 +87,10 @@ struct Qdisc {
        unsigned int            __state;
        struct gnet_stats_queue qstats;
        struct rcu_head         rcu_head;
-       spinlock_t              busylock;
-       u32                     limit;
+       int                     padded;
+       atomic_t                refcnt;
+
+       spinlock_t              busylock ____cacheline_aligned_in_smp;
 };
 
 static inline bool qdisc_is_running(const struct Qdisc *qdisc)
@@ -679,22 +680,26 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
 #endif
 
 struct psched_ratecfg {
-       u64 rate_bps;
-       u32 mult;
-       u32 shift;
+       u64     rate_bytes_ps; /* bytes per second */
+       u32     mult;
+       u16     overhead;
+       u8      shift;
 };
 
 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
                                unsigned int len)
 {
-       return ((u64)len * r->mult) >> r->shift;
+       return ((u64)(len + r->overhead) * r->mult) >> r->shift;
 }
 
-extern void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate);
+extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
 
-static inline u32 psched_ratecfg_getrate(const struct psched_ratecfg *r)
+static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
+                                         const struct psched_ratecfg *r)
 {
-       return r->rate_bps >> 3;
+       memset(res, 0, sizeof(*res));
+       res->rate = r->rate_bytes_ps;
+       res->overhead = r->overhead;
 }
 
 #endif
index cd89510eab2a44d405c2cba2817b3b7ae863306e..e6b95bc4d8e6b7ea527906b35384733f8681191d 100644 (file)
 #define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
 #endif
 
-
-/* Certain internal static functions need to be exported when
- * compiled into the test frame.
- */
-#ifndef SCTP_STATIC
-#define SCTP_STATIC static
-#endif
-
 /*
  * Function declarations.
  */
@@ -196,11 +188,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
  *  Section:  Macros, externs, and inlines
  */
 
-
-#ifdef TEST_FRAME
-#include <test_frame.h>
-#else
-
 /* spin lock wrappers. */
 #define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
 #define sctp_spin_unlock_irqrestore(lock, flags)  \
@@ -226,8 +213,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
 #define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
 #define SCTP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
 
-#endif /* !TEST_FRAME */
-
 /* sctp mib definitions */
 enum {
        SCTP_MIB_NUM = 0,
@@ -575,27 +560,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
 /* Round an int up to the next multiple of 4.  */
 #define WORD_ROUND(s) (((s)+3)&~3)
 
-/* Make a new instance of type.  */
-#define t_new(type, flags)     kzalloc(sizeof(type), flags)
-
-/* Compare two timevals.  */
-#define tv_lt(s, t) \
-   (s.tv_sec < t.tv_sec || (s.tv_sec == t.tv_sec && s.tv_usec < t.tv_usec))
-
-/* Add tv1 to tv2. */
-#define TIMEVAL_ADD(tv1, tv2) \
-({ \
-        suseconds_t usecs = (tv2).tv_usec + (tv1).tv_usec; \
-        time_t secs = (tv2).tv_sec + (tv1).tv_sec; \
-\
-        if (usecs >= 1000000) { \
-                usecs -= 1000000; \
-                secs++; \
-        } \
-        (tv2).tv_sec = secs; \
-        (tv2).tv_usec = usecs; \
-})
-
 /* External references. */
 
 extern struct proto sctp_prot;
index 1bd4c4144fe8b7c360ec45900b1e35018d0c1662..e745c92a153241d6f261a646b40de71a82f2d98b 100644 (file)
@@ -54,7 +54,7 @@
 #ifndef __sctp_structs_h__
 #define __sctp_structs_h__
 
-#include <linux/time.h>                /* We get struct timespec.    */
+#include <linux/ktime.h>
 #include <linux/socket.h>      /* linux/in.h needs this!!    */
 #include <linux/in.h>          /* We get struct sockaddr_in. */
 #include <linux/in6.h>         /* We get struct in6_addr     */
@@ -284,7 +284,7 @@ struct sctp_cookie {
        __u32 peer_ttag;
 
        /* When does this cookie expire? */
-       struct timeval expiration;
+       ktime_t expiration;
 
        /* Number of inbound/outbound streams which are set
         * and negotiated during the INIT process.
@@ -1537,7 +1537,7 @@ struct sctp_association {
        sctp_state_t state;
 
        /* The cookie life I award for any cookie.  */
-       struct timeval cookie_life;
+       ktime_t cookie_life;
 
        /* Overall     : The overall association error count.
         * Error Count : [Clear this any time I get something.]
index 5c97b0fc5623aa4bb44db9082d6631ad0a27fafe..ea6206ccc8967421b7156e04a30c5d293ba2bcce 100644 (file)
@@ -229,6 +229,8 @@ struct cg_proto;
   *    @sk_omem_alloc: "o" is "option" or "other"
   *    @sk_wmem_queued: persistent queue size
   *    @sk_forward_alloc: space allocated forward
+  *    @sk_napi_id: id of the last napi context to receive data for sk
+  *    @sk_ll_usec: usecs to busypoll when there is no data
   *    @sk_allocation: allocation mode
   *    @sk_sndbuf: size of send buffer in bytes
   *    @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
@@ -324,6 +326,10 @@ struct sock {
        int                     sk_forward_alloc;
 #ifdef CONFIG_RPS
        __u32                   sk_rxhash;
+#endif
+#ifdef CONFIG_NET_LL_RX_POLL
+       unsigned int            sk_napi_id;
+       unsigned int            sk_ll_usec;
 #endif
        atomic_t                sk_drops;
        int                     sk_rcvbuf;
@@ -866,6 +872,18 @@ struct inet_hashinfo;
 struct raw_hashinfo;
 struct module;
 
+/*
+ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+       if (offsetof(struct sock, sk_node.next) != 0)
+               memset(sk, 0, offsetof(struct sock, sk_node.next));
+       memset(&sk->sk_node.pprev, 0,
+              size - offsetof(struct sock, sk_node.pprev));
+}
+
 /* Networking protocol blocks we attach to sockets.
  * socket layer -> transport layer interface
  * transport -> network interface is defined by struct inet_proto
@@ -2029,18 +2047,21 @@ static inline void sk_wake_async(struct sock *sk, int how, int band)
                sock_wake_async(sk->sk_socket, how, band);
 }
 
-#define SOCK_MIN_SNDBUF 2048
-/*
- * Since sk_rmem_alloc sums skb->truesize, even a small frame might need
- * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak
+/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
+ * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
+ * Note: for send buffers, TCP works better if we can build two skbs at
+ * minimum.
  */
-#define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff))
+#define TCP_SKB_MIN_TRUESIZE   (2048 + sizeof(struct sk_buff))
+
+#define SOCK_MIN_SNDBUF                (TCP_SKB_MIN_TRUESIZE * 2)
+#define SOCK_MIN_RCVBUF                 TCP_SKB_MIN_TRUESIZE
 
 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
 {
        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
                sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
-               sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
+               sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
        }
 }
 
index 5bba80fbd1d9d92738d115cbc870bf0612cb0305..d1980054ec75b92c716097c377478b02fdee3762 100644 (file)
@@ -61,9 +61,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
  */
 #define MAX_TCP_WINDOW         32767U
 
-/* Offer an initial receive window of 10 mss. */
-#define TCP_DEFAULT_INIT_RCVWND        10
-
 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
 #define TCP_MIN_MSS            88U
 
@@ -1047,6 +1044,8 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
        rx_opt->num_sacks = 0;
 }
 
+extern u32 tcp_default_init_rwnd(u32 mss);
+
 /* Determine a window scaling and initial window to offer. */
 extern void tcp_select_initial_window(int __space, __u32 mss,
                                      __u32 *rcv_wnd, __u32 *window_clamp,
@@ -1193,7 +1192,6 @@ static inline void tcp_mib_init(struct net *net)
 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
 {
        tp->lost_skb_hint = NULL;
-       tp->scoreboard_skb_hint = NULL;
 }
 
 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
@@ -1284,11 +1282,13 @@ static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
 #define tcp_twsk_md5_key(twsk) NULL
 #endif
 
-extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
-extern void tcp_free_md5sig_pool(void);
+extern bool tcp_alloc_md5sig_pool(void);
 
 extern struct tcp_md5sig_pool  *tcp_get_md5sig_pool(void);
-extern void tcp_put_md5sig_pool(void);
+static inline void tcp_put_md5sig_pool(void)
+{
+       local_bh_enable();
+}
 
 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
@@ -1319,9 +1319,9 @@ void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
 
 /* Fastopen key context */
 struct tcp_fastopen_context {
-       struct crypto_cipher __rcu      *tfm;
-       __u8                            key[TCP_FASTOPEN_KEY_LENGTH];
-       struct rcu_head                 rcu;
+       struct crypto_cipher    *tfm;
+       __u8                    key[TCP_FASTOPEN_KEY_LENGTH];
+       struct rcu_head         rcu;
 };
 
 /* write queue abstraction */
@@ -1540,15 +1540,14 @@ extern struct request_sock_ops tcp6_request_sock_ops;
 
 extern void tcp_v4_destroy_sock(struct sock *sk);
 
-extern int tcp_v4_gso_send_check(struct sk_buff *skb);
 extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                                       netdev_features_t features);
 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
                                        struct sk_buff *skb);
-extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb);
 extern int tcp_gro_complete(struct sk_buff *skb);
-extern int tcp4_gro_complete(struct sk_buff *skb);
+
+extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
+                               __be32 daddr);
 
 #ifdef CONFIG_PROC_FS
 extern int tcp4_proc_init(void);
@@ -1583,6 +1582,8 @@ struct tcp_request_sock_ops {
 #endif
 };
 
+extern int tcpv4_offload_init(void);
+
 extern void tcp_v4_init(void);
 extern void tcp_init(void);
 
index 938b7fd1120477213888f9c7e61f98039bea3e03..48660e50ae90fdeec269fb2a1e5202e408021c3d 100644 (file)
@@ -3,56 +3,57 @@
 
 #include <net/checksum.h>
 
-/*
- *     IPv6 transport protocols
- */
-
+/* IPv6 transport protocols */
 extern struct proto rawv6_prot;
 extern struct proto udpv6_prot;
 extern struct proto udplitev6_prot;
 extern struct proto tcpv6_prot;
+extern struct proto pingv6_prot;
 
 struct flowi6;
 
 /* extension headers */
-extern int                             ipv6_exthdrs_init(void);
-extern void                            ipv6_exthdrs_exit(void);
-extern int                             ipv6_frag_init(void);
-extern void                            ipv6_frag_exit(void);
+int ipv6_exthdrs_init(void);
+void ipv6_exthdrs_exit(void);
+int ipv6_frag_init(void);
+void ipv6_frag_exit(void);
 
 /* transport protocols */
-extern int                             rawv6_init(void);
-extern void                            rawv6_exit(void);
-extern int                             udpv6_init(void);
-extern void                            udpv6_exit(void);
-extern int                             udplitev6_init(void);
-extern void                            udplitev6_exit(void);
-extern int                             tcpv6_init(void);
-extern void                            tcpv6_exit(void);
-
-extern int                             udpv6_connect(struct sock *sk,
-                                                     struct sockaddr *uaddr,
-                                                     int addr_len);
-
-extern int                     ip6_datagram_recv_ctl(struct sock *sk,
-                                                     struct msghdr *msg,
-                                                     struct sk_buff *skb);
-
-extern int                     ip6_datagram_send_ctl(struct net *net,
-                                                     struct sock *sk,
-                                                     struct msghdr *msg,
-                                                     struct flowi6 *fl6,
-                                                     struct ipv6_txoptions *opt,
-                                                     int *hlimit, int *tclass,
-                                                     int *dontfrag);
-
-#define                LOOPBACK4_IPV6          cpu_to_be32(0x7f000006)
-
-/*
- *     address family specific functions
- */
+int pingv6_init(void);
+void pingv6_exit(void);
+int rawv6_init(void);
+void rawv6_exit(void);
+int udpv6_init(void);
+void udpv6_exit(void);
+int udplitev6_init(void);
+void udplitev6_exit(void);
+int tcpv6_init(void);
+void tcpv6_exit(void);
+
+int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+                         struct sk_buff *skb);
+
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
+                         struct flowi6 *fl6, struct ipv6_txoptions *opt,
+                         int *hlimit, int *tclass, int *dontfrag);
+
+void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+                            __u16 srcp, __u16 destp, int bucket);
+
+#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
+
+/* address family specific functions */
 extern const struct inet_connection_sock_af_ops ipv4_specific;
 
-extern void inet6_destroy_sock(struct sock *sk);
+void inet6_destroy_sock(struct sock *sk);
+
+#define IPV6_SEQ_DGRAM_HEADER                                         \
+       "  sl  "                                                       \
+       "local_address                         "                       \
+       "remote_address                        "                       \
+       "st tx_queue rx_queue tr tm->when retrnsmt"                    \
+       "   uid  timeout inode ref pointer drops\n"
 
 #endif
index 065f379c6503a489a901dfca0f26312911ada0f4..b30a71a51839a76a32a11ffeca64d01287b2bb9b 100644 (file)
@@ -187,6 +187,8 @@ extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int udp_disconnect(struct sock *sk, int flags);
 extern unsigned int udp_poll(struct file *file, struct socket *sock,
                             poll_table *wait);
+extern struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+                                             netdev_features_t features);
 extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                              char __user *optval, int __user *optlen);
 extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
@@ -262,11 +264,10 @@ extern int udp4_proc_init(void);
 extern void udp4_proc_exit(void);
 #endif
 
+extern int udpv4_offload_init(void);
+
 extern void udp_init(void);
 
-extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
-       netdev_features_t features);
 extern void udp_encap_enable(void);
 #if IS_ENABLED(CONFIG_IPV6)
 extern void udpv6_encap_enable(void);
index ae16531d0d353741ac66c83e0464341d060d5cce..94ce082b29dcdba2d726cbc90a0c4510fd38fb2b 100644 (file)
@@ -1160,6 +1160,8 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
        }
 }
 
+extern void xfrm_garbage_collect(struct net *net);
+
 #else
 
 static inline void xfrm_sk_free_policy(struct sock *sk) {}
@@ -1194,6 +1196,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
 {
        return 1;
 }
+static inline void xfrm_garbage_collect(struct net *net)
+{
+}
 #endif
 
 static __inline__
index d4609029f014e3dc9cd4592e6719bba470f4bb67..385c6329a96781fe42adc8772c3be3d3a2e65cec 100644 (file)
@@ -450,7 +450,8 @@ enum snd_soc_dapm_type {
        snd_soc_dapm_aif_in,            /* audio interface input */
        snd_soc_dapm_aif_out,           /* audio interface output */
        snd_soc_dapm_siggen,            /* signal generator */
-       snd_soc_dapm_dai,               /* link to DAI structure */
+       snd_soc_dapm_dai_in,            /* link to DAI structure */
+       snd_soc_dapm_dai_out,
        snd_soc_dapm_dai_link,          /* link between two DAI structures */
 };
 
index c4af592f7057e472d8953099c56651423d0ec6ad..4ea4f985f39409cf8e03c03972e5b923ca756fff 100644 (file)
@@ -463,7 +463,6 @@ struct se_cmd {
 #define CMD_T_ABORTED          (1 << 0)
 #define CMD_T_ACTIVE           (1 << 1)
 #define CMD_T_COMPLETE         (1 << 2)
-#define CMD_T_QUEUED           (1 << 3)
 #define CMD_T_SENT             (1 << 4)
 #define CMD_T_STOP             (1 << 5)
 #define CMD_T_FAILED           (1 << 6)
@@ -544,6 +543,7 @@ struct se_session {
        struct list_head        sess_list;
        struct list_head        sess_acl_list;
        struct list_head        sess_cmd_list;
+       struct list_head        sess_wait_list;
        spinlock_t              sess_cmd_lock;
        struct kref             sess_kref;
 };
@@ -572,12 +572,8 @@ struct se_dev_entry {
        bool                    def_pr_registered;
        /* See transport_lunflags_table */
        u32                     lun_flags;
-       u32                     deve_cmds;
        u32                     mapped_lun;
-       u32                     average_bytes;
-       u32                     last_byte_count;
        u32                     total_cmds;
-       u32                     total_bytes;
        u64                     pr_res_key;
        u64                     creation_time;
        u32                     attach_count;
index ba3471b73c07557d91411d9d6b3367e356962096..1dcce9cc99b9bb0321b0ad9419adadb41a1c1346 100644 (file)
@@ -114,7 +114,7 @@ sense_reason_t      transport_generic_new_cmd(struct se_cmd *);
 
 void   target_execute_cmd(struct se_cmd *cmd);
 
-void   transport_generic_free_cmd(struct se_cmd *, int);
+int    transport_generic_free_cmd(struct se_cmd *, int);
 
 bool   transport_wait_for_tasks(struct se_cmd *);
 int    transport_check_aborted_status(struct se_cmd *, int);
@@ -123,7 +123,7 @@ int transport_send_check_condition_and_sense(struct se_cmd *,
 int    target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
 int    target_put_sess_cmd(struct se_session *, struct se_cmd *);
 void   target_sess_cmd_list_set_waiting(struct se_session *);
-void   target_wait_for_sess_cmds(struct se_session *, int);
+void   target_wait_for_sess_cmds(struct se_session *);
 
 int    core_alua_check_nonop_delay(struct se_cmd *);
 
index d0e686402df883d89e474a63374cdd1aa4c73ba6..8ee15b97cd389113859e614e6135a8e1eb6916b5 100644 (file)
@@ -2139,7 +2139,7 @@ TRACE_EVENT(ext4_es_remove_extent,
                  __entry->lblk, __entry->len)
 );
 
-TRACE_EVENT(ext4_es_find_delayed_extent_enter,
+TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
        TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
 
        TP_ARGS(inode, lblk),
@@ -2161,7 +2161,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_enter,
                  (unsigned long) __entry->ino, __entry->lblk)
 );
 
-TRACE_EVENT(ext4_es_find_delayed_extent_exit,
+TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
        TP_PROTO(struct inode *inode, struct extent_status *es),
 
        TP_ARGS(inode, es),
index 9ce7f44aebd2ab64a91b2f21b436a3377e54f045..4aee586979ca91bed3b125f536a0412cbc9a35ea 100644 (file)
@@ -30,6 +30,8 @@
 
 #define POLLFREE       0x4000  /* currently only for epoll */
 
+#define POLL_LL                0x8000
+
 struct pollfd {
        int fd;
        short events;
index c5d2e3a1cf68743862049482406f2a49adbfb707..ca3a20d772ac7f6f8c1afe4e0ff4f90bfe7e75bc 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
+#define SO_LL                  46
+
 #endif /* __ASM_GENERIC_SOCKET_H */
index 0c9b44871df07b3f7e09de4a2d922f421a4895b5..38dbafaa5341154167a685a245096fbece6a375d 100644 (file)
@@ -993,8 +993,8 @@ enum ethtool_sfeatures_retval_bits {
 #define PORT_OTHER             0xff
 
 /* Which transceiver to use. */
-#define XCVR_INTERNAL          0x00
-#define XCVR_EXTERNAL          0x01
+#define XCVR_INTERNAL          0x00 /* PHY and MAC are in the same package */
+#define XCVR_EXTERNAL          0x01 /* PHY and MAC are in different packages */
 #define XCVR_DUMMY1            0x02
 #define XCVR_DUMMY2            0x03
 #define XCVR_DUMMY3            0x04
index 552c8a0a12d15f11f6ac2b3b1a27d17a94044f8b..6487317ea619c41d4ea662f1162417fa6c6e947f 100644 (file)
@@ -9,6 +9,7 @@ enum {
        TCA_STATS_RATE_EST,
        TCA_STATS_QUEUE,
        TCA_STATS_APP,
+       TCA_STATS_RATE_EST64,
        __TCA_STATS_MAX,
 };
 #define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
@@ -37,6 +38,16 @@ struct gnet_stats_rate_est {
        __u32   pps;
 };
 
+/**
+ * struct gnet_stats_rate_est64 - rate estimator
+ * @bps: current byte rate
+ * @pps: current packet rate
+ */
+struct gnet_stats_rate_est64 {
+       __u64   bps;
+       __u64   pps;
+};
+
 /**
  * struct gnet_stats_queue - queuing statistics
  * @qlen: queue length
index 82c7d1bdadeb3d853ad4e33e51bc3fa8962c7d82..d7fea3496f323078739fc6c6148be125f79797c3 100644 (file)
@@ -93,6 +93,7 @@
 #define ARPHRD_PHONET_PIPE 821         /* PhoNet pipe header           */
 #define ARPHRD_CAIF    822             /* CAIF media type              */
 #define ARPHRD_IP6GRE  823             /* GRE over IPv6                */
+#define ARPHRD_NETLINK 824             /* Netlink header               */
 
 #define ARPHRD_VOID      0xFFFF        /* Void type, nothing is known */
 #define ARPHRD_NONE      0xFFFE        /* zero header length */
index b05823cae784219b2afd8ce5406cea66857e173a..03f6170ab3372f443c0989c7c5a503b00ef0c308 100644 (file)
@@ -221,6 +221,8 @@ enum {
        IFLA_BRPORT_GUARD,      /* bpdu guard              */
        IFLA_BRPORT_PROTECT,    /* root port protection    */
        IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave    */
+       IFLA_BRPORT_LEARNING,   /* mac learning */
+       IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -336,6 +338,7 @@ enum {
        IFLA_VF_VLAN,
        IFLA_VF_TX_RATE,        /* TX Bandwidth Allocation */
        IFLA_VF_SPOOFCHK,       /* Spoof Checking on/off switch */
+       IFLA_VF_LINK_STATE,     /* link state enable/disable/auto switch */
        __IFLA_VF_MAX,
 };
 
@@ -362,6 +365,18 @@ struct ifla_vf_spoofchk {
        __u32 setting;
 };
 
+enum {
+       IFLA_VF_LINK_STATE_AUTO,        /* link state of the uplink */
+       IFLA_VF_LINK_STATE_ENABLE,      /* link always up */
+       IFLA_VF_LINK_STATE_DISABLE,     /* link always down */
+       __IFLA_VF_LINK_STATE_MAX,
+};
+
+struct ifla_vf_link_state {
+       __u32 vf;
+       __u32 link_state;
+};
+
 /* VF ports management section
  *
  *     Nested layout of set/get msg is:
index 2835b85fd46d85ae47ec857caa93db1ee5ba79b9..82334f88967e9ba269d3e3fa121f563c33e64eed 100644 (file)
@@ -68,6 +68,8 @@
 #define IFF_MULTI_QUEUE 0x0100
 #define IFF_ATTACH_QUEUE 0x0200
 #define IFF_DETACH_QUEUE 0x0400
+/* read-only flag */
+#define IFF_PERSIST    0x0800
 
 /* Features for GSO (TUNSETOFFLOAD). */
 #define TUN_F_CSUM     0x01    /* You can hand me unchecksummed packets. */
index a5c86fc34a370f8480b7c1773b18d7b5bee6b940..d88c8ee00c8b7b39cc935c8353bc7f4f3eb3c8bf 100644 (file)
@@ -783,6 +783,7 @@ struct kvm_dirty_tlb {
 #define KVM_REG_IA64           0x3000000000000000ULL
 #define KVM_REG_ARM            0x4000000000000000ULL
 #define KVM_REG_S390           0x5000000000000000ULL
+#define KVM_REG_MIPS           0x7000000000000000ULL
 
 #define KVM_REG_SIZE_SHIFT     52
 #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
index 405918dd7b3f55f1ad2c3d9b2d290bd23bf33e9b..c55efaaa9bb4d889a6fdbee064a00b318f0043cc 100644 (file)
@@ -164,6 +164,7 @@ enum ovs_vport_type {
        OVS_VPORT_TYPE_UNSPEC,
        OVS_VPORT_TYPE_NETDEV,   /* network device */
        OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
+       OVS_VPORT_TYPE_GRE,      /* GRE tunnel. */
        __OVS_VPORT_TYPE_MAX
 };
 
@@ -192,7 +193,6 @@ enum ovs_vport_type {
  * optional; if not specified a free port number is automatically selected.
  * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type
  * of vport.
- * and other attributes are ignored.
  *
  * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to
  * look up the vport to operate on; otherwise dp_idx from the &struct
@@ -247,11 +247,29 @@ enum ovs_key_attr {
        OVS_KEY_ATTR_ARP,       /* struct ovs_key_arp */
        OVS_KEY_ATTR_ND,        /* struct ovs_key_nd */
        OVS_KEY_ATTR_SKB_MARK,  /* u32 skb mark */
+       OVS_KEY_ATTR_TUNNEL,    /* Nested set of ovs_tunnel attributes */
+
+#ifdef __KERNEL__
+       OVS_KEY_ATTR_IPV4_TUNNEL,  /* struct ovs_key_ipv4_tunnel */
+#endif
        __OVS_KEY_ATTR_MAX
 };
 
 #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
 
+enum ovs_tunnel_key_attr {
+       OVS_TUNNEL_KEY_ATTR_ID,                 /* be64 Tunnel ID */
+       OVS_TUNNEL_KEY_ATTR_IPV4_SRC,           /* be32 src IP address. */
+       OVS_TUNNEL_KEY_ATTR_IPV4_DST,           /* be32 dst IP address. */
+       OVS_TUNNEL_KEY_ATTR_TOS,                /* u8 Tunnel IP ToS. */
+       OVS_TUNNEL_KEY_ATTR_TTL,                /* u8 Tunnel IP TTL. */
+       OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT,      /* No argument, set DF. */
+       OVS_TUNNEL_KEY_ATTR_CSUM,               /* No argument. CSUM packet. */
+       __OVS_TUNNEL_KEY_ATTR_MAX
+};
+
+#define OVS_TUNNEL_KEY_ATTR_MAX (__OVS_TUNNEL_KEY_ATTR_MAX - 1)
+
 /**
  * enum ovs_frag_type - IPv4 and IPv6 fragment type
  * @OVS_FRAG_TYPE_NONE: Packet is not a fragment.
index 7a2144e1afae679a198449bad4e7b57a77a63fb0..eb0f1a554d7ba19c95b73315374c34757ba3ec97 100644 (file)
@@ -386,6 +386,8 @@ enum {
 #define RTAX_RTO_MIN RTAX_RTO_MIN
        RTAX_INITRWND,
 #define RTAX_INITRWND RTAX_INITRWND
+       RTAX_QUICKACK,
+#define RTAX_QUICKACK RTAX_QUICKACK
        __RTAX_MAX
 };
 
index df2e8b4f9c033f70f7115aeb1b7a846996f52a0a..af0a674cc677f570bf5d5e04683957605d5e9ea1 100644 (file)
@@ -253,6 +253,7 @@ enum
        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW,    /* TCPFastOpenListenOverflow */
        LINUX_MIB_TCPFASTOPENCOOKIEREQD,        /* TCPFastOpenCookieReqd */
        LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
+       LINUX_MIB_LOWLATENCYRXPACKETS,          /* LowLatencyRxPackets */
        __LINUX_MIB_MAX
 };
 
@@ -287,6 +288,7 @@ enum
        LINUX_MIB_XFRMOUTPOLERROR,              /* XfrmOutPolError */
        LINUX_MIB_XFRMFWDHDRERROR,              /* XfrmFwdHdrError*/
        LINUX_MIB_XFRMOUTSTATEINVALID,          /* XfrmOutStateInvalid */
+       LINUX_MIB_XFRMACQUIREERROR,             /* XfrmAcquireError */
        __LINUX_MIB_XFRMMAX
 };
 
index f2d90091cc2098fd35893132cc45c0b6795c6319..852373d27dbb2bdd2016bf6a9d2ba00cd68e2954 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * include/linux/tipc.h: Include file for TIPC socket interface
+ * include/uapi/linux/tipc.h: Header for TIPC socket interface
  *
  * Copyright (c) 2003-2006, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
index 0b1e3f218a36b95ff1f8163e89a365ae9d705dc6..6b0bff09b3a7ced5dc7cf2c1a07dd4f82112b088 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * include/linux/tipc_config.h: Include file for TIPC configuration interface
+ * include/uapi/linux/tipc_config.h: Header for TIPC configuration interface
  *
  * Copyright (c) 2003-2006, Ericsson AB
  * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
index ee13ab6c361409b31216da2e54790f804401fe51..c312f16bc4e7e692f4117b0a9280cadf1977f8db 100644 (file)
@@ -39,7 +39,7 @@
 #define VIRTIO_CONSOLE_F_SIZE  0       /* Does host provide console size? */
 #define VIRTIO_CONSOLE_F_MULTIPORT 1   /* Does host provide multiple ports? */
 
-#define VIRTIO_CONSOLE_BAD_ID          (~(u32)0)
+#define VIRTIO_CONSOLE_BAD_ID          (~(__u32)0)
 
 struct virtio_console_config {
        /* colums of the screens */
index 62ca9a77c1d65dff76823857e895f61280893a1b..aeb4e9a0c5d1c1e4dbd484286983bc3eada936df 100644 (file)
@@ -748,6 +748,7 @@ struct omap_dss_driver {
 };
 
 enum omapdss_version omapdss_get_version(void);
+bool omapdss_is_initialized(void);
 
 int omap_dss_register_driver(struct omap_dss_driver *);
 void omap_dss_unregister_driver(struct omap_dss_driver *);
index 3ef3fe05ee99c55aa27d7c3eb5f828935b89ed59..eb262e3324d2fca44da8c88bf3127fd4da65b118 100644 (file)
  * that it cannot safely queue packets (as it may not be kicked to send them).
  */
 
+ /*
+ * "feature-split-event-channels" is introduced to separate guest TX
+ * and RX notificaion. Backend either doesn't support this feature or
+ * advertise it via xenstore as 0 (disabled) or 1 (enabled).
+ *
+ * To make use of this feature, frontend should allocate two event
+ * channels for TX and RX, advertise them to backend as
+ * "event-channel-tx" and "event-channel-rx" respectively. If frontend
+ * doesn't want to use this feature, it just writes "event-channel"
+ * node as before.
+ */
+
 /*
  * This is the 'wire' format for packets:
  *  Request 1: xen_netif_tx_request  -- XEN_NETTXF_* (any flags)
index 0a7515c1e3a424214158750aaf1262c05fac0d48..569c07f2e3446d57acb82b3430a1ceeb5f85fb95 100644 (file)
@@ -70,6 +70,7 @@ struct xenbus_device {
        struct device dev;
        enum xenbus_state state;
        struct completion down;
+       struct work_struct work;
 };
 
 static inline struct xenbus_device *to_xenbus_device(struct device *dev)
index 9d3a7887a6d3e0b7a8323865b658e1d088dcea85..2d9b83104dcf715197f1a84d34b892783ff79fe2 100644 (file)
@@ -431,6 +431,7 @@ choice
 config TREE_RCU
        bool "Tree-based hierarchical RCU"
        depends on !PREEMPT && SMP
+       select IRQ_WORK
        help
          This option selects the RCU implementation that is
          designed for very large SMP system with hundreds or
index a7e40ed8a07674fd0493c1495a012a09759d835b..70480a3aa69891b6ebc6c998e2202c3197a443ec 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -752,19 +752,29 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
                        int otime, struct list_head *pt)
 {
        int i;
+       int progress;
 
-       if (sma->complex_count || sops == NULL) {
-               if (update_queue(sma, -1, pt))
+       progress = 1;
+retry_global:
+       if (sma->complex_count) {
+               if (update_queue(sma, -1, pt)) {
+                       progress = 1;
                        otime = 1;
+                       sops = NULL;
+               }
        }
+       if (!progress)
+               goto done;
 
        if (!sops) {
                /* No semops; something special is going on. */
                for (i = 0; i < sma->sem_nsems; i++) {
-                       if (update_queue(sma, i, pt))
+                       if (update_queue(sma, i, pt)) {
                                otime = 1;
+                               progress = 1;
+                       }
                }
-               goto done;
+               goto done_checkretry;
        }
 
        /* Check the semaphores that were modified. */
@@ -772,8 +782,15 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
                if (sops[i].sem_op > 0 ||
                        (sops[i].sem_op < 0 &&
                                sma->sem_base[sops[i].sem_num].semval == 0))
-                       if (update_queue(sma, sops[i].sem_num, pt))
+                       if (update_queue(sma, sops[i].sem_num, pt)) {
                                otime = 1;
+                               progress = 1;
+                       }
+       }
+done_checkretry:
+       if (progress) {
+               progress = 0;
+               goto retry_global;
        }
 done:
        if (otime)
index 21c7fa615bd3107b0c28a4da499ea3ee7361d695..91e53d04b6a9e8841e697dcb290f1206468da21a 100644 (file)
@@ -1056,7 +1056,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
 static void wait_for_auditd(unsigned long sleep_time)
 {
        DECLARE_WAITQUEUE(wait, current);
-       set_current_state(TASK_INTERRUPTIBLE);
+       set_current_state(TASK_UNINTERRUPTIBLE);
        add_wait_queue(&audit_backlog_wait, &wait);
 
        if (audit_backlog_limit &&
index a291aa23fb3fa770756b05f7453be4c92531004c..43c307dc9453d5c9166596d2303deaf099cbf5b0 100644 (file)
@@ -658,6 +658,7 @@ int audit_add_tree_rule(struct audit_krule *rule)
        struct vfsmount *mnt;
        int err;
 
+       rule->tree = NULL;
        list_for_each_entry(tree, &tree_list, list) {
                if (!strcmp(seed->pathname, tree->pathname)) {
                        put_tree(seed);
index 83a2970295d19ffdb1bd7417ed2b1b54a815cba1..6bd4a90d1991cdf84e5d14fa187af4a3b3f390f9 100644 (file)
@@ -1021,9 +1021,6 @@ static void audit_log_rule_change(char *action, struct audit_krule *rule, int re
  * @seq: netlink audit message sequence (serial) number
  * @data: payload data
  * @datasz: size of payload data
- * @loginuid: loginuid of sender
- * @sessionid: sessionid for netlink audit message
- * @sid: SE Linux Security ID of sender
  */
 int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
 {
index 2a9926275f806f41e7c15b6eed584acaf8796bc0..a7c9e6ddb9797a886e96bbff7c75fb8ed9670c68 100644 (file)
@@ -1686,11 +1686,14 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                 */
                cgroup_drop_root(opts.new_root);
 
-               if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) &&
-                   root->flags != opts.flags) {
-                       pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
-                       ret = -EINVAL;
-                       goto drop_new_super;
+               if (root->flags != opts.flags) {
+                       if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
+                               pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
+                               ret = -EINVAL;
+                               goto drop_new_super;
+                       } else {
+                               pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
+                       }
                }
 
                /* no subsys rebinding, so refcounts don't change */
@@ -2699,13 +2702,14 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
                goto out;
        }
 
+       cfe->type = (void *)cft;
+       cfe->dentry = dentry;
+       dentry->d_fsdata = cfe;
+       simple_xattrs_init(&cfe->xattrs);
+
        mode = cgroup_file_mode(cft);
        error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
        if (!error) {
-               cfe->type = (void *)cft;
-               cfe->dentry = dentry;
-               dentry->d_fsdata = cfe;
-               simple_xattrs_init(&cfe->xattrs);
                list_add_tail(&cfe->node, &parent->files);
                cfe = NULL;
        }
@@ -2953,11 +2957,8 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
        WARN_ON_ONCE(!rcu_read_lock_held());
 
        /* if first iteration, pretend we just visited @cgroup */
-       if (!pos) {
-               if (list_empty(&cgroup->children))
-                       return NULL;
+       if (!pos)
                pos = cgroup;
-       }
 
        /* visit the first child if exists */
        next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
@@ -2965,14 +2966,14 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
                return next;
 
        /* no child, visit my or the closest ancestor's next sibling */
-       do {
+       while (pos != cgroup) {
                next = list_entry_rcu(pos->sibling.next, struct cgroup,
                                      sibling);
                if (&next->sibling != &pos->parent->children)
                        return next;
 
                pos = pos->parent;
-       } while (pos != cgroup);
+       }
 
        return NULL;
 }
index b5e4ab2d427e874404347c7e20186ed4cfd2f488..198a38883e64a0616437401ef2692f5b3c9029a5 100644 (file)
@@ -133,6 +133,27 @@ static void cpu_hotplug_done(void)
        mutex_unlock(&cpu_hotplug.lock);
 }
 
+/*
+ * Wait for currently running CPU hotplug operations to complete (if any) and
+ * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
+ * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
+ * hotplug path before performing hotplug operations. So acquiring that lock
+ * guarantees mutual exclusion from any currently running hotplug operations.
+ */
+void cpu_hotplug_disable(void)
+{
+       cpu_maps_update_begin();
+       cpu_hotplug_disabled = 1;
+       cpu_maps_update_done();
+}
+
+void cpu_hotplug_enable(void)
+{
+       cpu_maps_update_begin();
+       cpu_hotplug_disabled = 0;
+       cpu_maps_update_done();
+}
+
 #else /* #if CONFIG_HOTPLUG_CPU */
 static void cpu_hotplug_begin(void) {}
 static void cpu_hotplug_done(void) {}
@@ -540,36 +561,6 @@ static int __init alloc_frozen_cpus(void)
 }
 core_initcall(alloc_frozen_cpus);
 
-/*
- * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
- * hotplug when tasks are about to be frozen. Also, don't allow the freezer
- * to continue until any currently running CPU hotplug operation gets
- * completed.
- * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
- * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
- * CPU hotplug path and released only after it is complete. Thus, we
- * (and hence the freezer) will block here until any currently running CPU
- * hotplug operation gets completed.
- */
-void cpu_hotplug_disable_before_freeze(void)
-{
-       cpu_maps_update_begin();
-       cpu_hotplug_disabled = 1;
-       cpu_maps_update_done();
-}
-
-
-/*
- * When tasks have been thawed, re-enable regular CPU hotplug (which had been
- * disabled while beginning to freeze tasks).
- */
-void cpu_hotplug_enable_after_thaw(void)
-{
-       cpu_maps_update_begin();
-       cpu_hotplug_disabled = 0;
-       cpu_maps_update_done();
-}
-
 /*
  * When callbacks for CPU hotplug notifications are being executed, we must
  * ensure that the state of the system with respect to the tasks being frozen
@@ -589,12 +580,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
 
        case PM_SUSPEND_PREPARE:
        case PM_HIBERNATION_PREPARE:
-               cpu_hotplug_disable_before_freeze();
+               cpu_hotplug_disable();
                break;
 
        case PM_POST_SUSPEND:
        case PM_POST_HIBERNATION:
-               cpu_hotplug_enable_after_thaw();
+               cpu_hotplug_enable();
                break;
 
        default:
index 8b86c0c68edf4458e894433a45ac8d5b80f673a0..d5585f5e038ee1080f17777792a6927d5897e5ea 100644 (file)
@@ -40,11 +40,13 @@ __setup("hlt", cpu_idle_nopoll_setup);
 
 static inline int cpu_idle_poll(void)
 {
+       rcu_idle_enter();
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
        while (!need_resched())
                cpu_relax();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+       rcu_idle_exit();
        return 1;
 }
 
index 6b41c1899a8b00acc0ca48ae30b0e8dfbdd2ad9d..9dc297faf7c01b68cbb1a7a24444b99c300cb8a3 100644 (file)
@@ -4394,6 +4394,64 @@ perf_event_read_event(struct perf_event *event,
        perf_output_end(&handle);
 }
 
+typedef int  (perf_event_aux_match_cb)(struct perf_event *event, void *data);
+typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
+
+static void
+perf_event_aux_ctx(struct perf_event_context *ctx,
+                  perf_event_aux_match_cb match,
+                  perf_event_aux_output_cb output,
+                  void *data)
+{
+       struct perf_event *event;
+
+       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+               if (event->state < PERF_EVENT_STATE_INACTIVE)
+                       continue;
+               if (!event_filter_match(event))
+                       continue;
+               if (match(event, data))
+                       output(event, data);
+       }
+}
+
+static void
+perf_event_aux(perf_event_aux_match_cb match,
+              perf_event_aux_output_cb output,
+              void *data,
+              struct perf_event_context *task_ctx)
+{
+       struct perf_cpu_context *cpuctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int ctxn;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->unique_pmu != pmu)
+                       goto next;
+               perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
+               if (task_ctx)
+                       goto next;
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx)
+                       perf_event_aux_ctx(ctx, match, output, data);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
+
+       if (task_ctx) {
+               preempt_disable();
+               perf_event_aux_ctx(task_ctx, match, output, data);
+               preempt_enable();
+       }
+       rcu_read_unlock();
+}
+
 /*
  * task tracking -- fork/exit
  *
@@ -4416,8 +4474,9 @@ struct perf_task_event {
 };
 
 static void perf_event_task_output(struct perf_event *event,
-                                    struct perf_task_event *task_event)
+                                  void *data)
 {
+       struct perf_task_event *task_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        struct task_struct *task = task_event->task;
@@ -4445,62 +4504,11 @@ out:
        task_event->event_id.header.size = size;
 }
 
-static int perf_event_task_match(struct perf_event *event)
-{
-       if (event->state < PERF_EVENT_STATE_INACTIVE)
-               return 0;
-
-       if (!event_filter_match(event))
-               return 0;
-
-       if (event->attr.comm || event->attr.mmap ||
-           event->attr.mmap_data || event->attr.task)
-               return 1;
-
-       return 0;
-}
-
-static void perf_event_task_ctx(struct perf_event_context *ctx,
-                                 struct perf_task_event *task_event)
+static int perf_event_task_match(struct perf_event *event,
+                                void *data __maybe_unused)
 {
-       struct perf_event *event;
-
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_event_task_match(event))
-                       perf_event_task_output(event, task_event);
-       }
-}
-
-static void perf_event_task_event(struct perf_task_event *task_event)
-{
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       struct pmu *pmu;
-       int ctxn;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       goto next;
-               perf_event_task_ctx(&cpuctx->ctx, task_event);
-
-               ctx = task_event->task_ctx;
-               if (!ctx) {
-                       ctxn = pmu->task_ctx_nr;
-                       if (ctxn < 0)
-                               goto next;
-                       ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
-                       if (ctx)
-                               perf_event_task_ctx(ctx, task_event);
-               }
-next:
-               put_cpu_ptr(pmu->pmu_cpu_context);
-       }
-       if (task_event->task_ctx)
-               perf_event_task_ctx(task_event->task_ctx, task_event);
-
-       rcu_read_unlock();
+       return event->attr.comm || event->attr.mmap ||
+              event->attr.mmap_data || event->attr.task;
 }
 
 static void perf_event_task(struct task_struct *task,
@@ -4531,7 +4539,10 @@ static void perf_event_task(struct task_struct *task,
                },
        };
 
-       perf_event_task_event(&task_event);
+       perf_event_aux(perf_event_task_match,
+                      perf_event_task_output,
+                      &task_event,
+                      task_ctx);
 }
 
 void perf_event_fork(struct task_struct *task)
@@ -4557,8 +4568,9 @@ struct perf_comm_event {
 };
 
 static void perf_event_comm_output(struct perf_event *event,
-                                    struct perf_comm_event *comm_event)
+                                  void *data)
 {
+       struct perf_comm_event *comm_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        int size = comm_event->event_id.header.size;
@@ -4585,39 +4597,16 @@ out:
        comm_event->event_id.header.size = size;
 }
 
-static int perf_event_comm_match(struct perf_event *event)
-{
-       if (event->state < PERF_EVENT_STATE_INACTIVE)
-               return 0;
-
-       if (!event_filter_match(event))
-               return 0;
-
-       if (event->attr.comm)
-               return 1;
-
-       return 0;
-}
-
-static void perf_event_comm_ctx(struct perf_event_context *ctx,
-                                 struct perf_comm_event *comm_event)
+static int perf_event_comm_match(struct perf_event *event,
+                                void *data __maybe_unused)
 {
-       struct perf_event *event;
-
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_event_comm_match(event))
-                       perf_event_comm_output(event, comm_event);
-       }
+       return event->attr.comm;
 }
 
 static void perf_event_comm_event(struct perf_comm_event *comm_event)
 {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
        char comm[TASK_COMM_LEN];
        unsigned int size;
-       struct pmu *pmu;
-       int ctxn;
 
        memset(comm, 0, sizeof(comm));
        strlcpy(comm, comm_event->task->comm, sizeof(comm));
@@ -4627,24 +4616,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        comm_event->comm_size = size;
 
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
-       rcu_read_lock();
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       goto next;
-               perf_event_comm_ctx(&cpuctx->ctx, comm_event);
 
-               ctxn = pmu->task_ctx_nr;
-               if (ctxn < 0)
-                       goto next;
-
-               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
-               if (ctx)
-                       perf_event_comm_ctx(ctx, comm_event);
-next:
-               put_cpu_ptr(pmu->pmu_cpu_context);
-       }
-       rcu_read_unlock();
+       perf_event_aux(perf_event_comm_match,
+                      perf_event_comm_output,
+                      comm_event,
+                      NULL);
 }
 
 void perf_event_comm(struct task_struct *task)
@@ -4706,8 +4682,9 @@ struct perf_mmap_event {
 };
 
 static void perf_event_mmap_output(struct perf_event *event,
-                                    struct perf_mmap_event *mmap_event)
+                                  void *data)
 {
+       struct perf_mmap_event *mmap_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        int size = mmap_event->event_id.header.size;
@@ -4734,46 +4711,24 @@ out:
 }
 
 static int perf_event_mmap_match(struct perf_event *event,
-                                  struct perf_mmap_event *mmap_event,
-                                  int executable)
-{
-       if (event->state < PERF_EVENT_STATE_INACTIVE)
-               return 0;
-
-       if (!event_filter_match(event))
-               return 0;
-
-       if ((!executable && event->attr.mmap_data) ||
-           (executable && event->attr.mmap))
-               return 1;
-
-       return 0;
-}
-
-static void perf_event_mmap_ctx(struct perf_event_context *ctx,
-                                 struct perf_mmap_event *mmap_event,
-                                 int executable)
+                                void *data)
 {
-       struct perf_event *event;
+       struct perf_mmap_event *mmap_event = data;
+       struct vm_area_struct *vma = mmap_event->vma;
+       int executable = vma->vm_flags & VM_EXEC;
 
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_event_mmap_match(event, mmap_event, executable))
-                       perf_event_mmap_output(event, mmap_event);
-       }
+       return (!executable && event->attr.mmap_data) ||
+              (executable && event->attr.mmap);
 }
 
 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
 {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
        struct vm_area_struct *vma = mmap_event->vma;
        struct file *file = vma->vm_file;
        unsigned int size;
        char tmp[16];
        char *buf = NULL;
        const char *name;
-       struct pmu *pmu;
-       int ctxn;
 
        memset(tmp, 0, sizeof(tmp));
 
@@ -4829,27 +4784,10 @@ got_name:
 
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       goto next;
-               perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
-                                       vma->vm_flags & VM_EXEC);
-
-               ctxn = pmu->task_ctx_nr;
-               if (ctxn < 0)
-                       goto next;
-
-               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
-               if (ctx) {
-                       perf_event_mmap_ctx(ctx, mmap_event,
-                                       vma->vm_flags & VM_EXEC);
-               }
-next:
-               put_cpu_ptr(pmu->pmu_cpu_context);
-       }
-       rcu_read_unlock();
+       perf_event_aux(perf_event_mmap_match,
+                      perf_event_mmap_output,
+                      mmap_event,
+                      NULL);
 
        kfree(buf);
 }
index af2eb3cbd499305c3d418c7f8280c2d7f0f9437d..7bb73f9d09dbeedcc6c07f6a8dc8257f76fc4963 100644 (file)
@@ -649,7 +649,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
         *      jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
         */
        forget_original_parent(tsk);
-       exit_task_namespaces(tsk);
 
        write_lock_irq(&tasklist_lock);
        if (group_dead)
@@ -795,6 +794,7 @@ void do_exit(long code)
        exit_shm(tsk);
        exit_files(tsk);
        exit_fs(tsk);
+       exit_task_namespaces(tsk);
        exit_task_work(tsk);
        check_stack_usage();
        exit_thread();
index 5a83dde8ca0c22544cead99b7ea3e2e0be813d27..54a4d5223238e15843559d16386bc4cdc1e63f25 100644 (file)
@@ -143,7 +143,10 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
  * irq_domain_add_simple() - Allocate and register a simple irq_domain.
  * @of_node: pointer to interrupt controller's device tree node.
  * @size: total number of irqs in mapping
- * @first_irq: first number of irq block assigned to the domain
+ * @first_irq: first number of irq block assigned to the domain,
+ *     pass zero to assign irqs on-the-fly. This will result in a
+ *     linear IRQ domain so it is important to use irq_create_mapping()
+ *     for each used IRQ, especially when SPARSE_IRQ is enabled.
  * @ops: map/unmap domain callbacks
  * @host_data: Controller private data pointer
  *
@@ -191,6 +194,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
        /* A linear domain is the default */
        return irq_domain_add_linear(of_node, size, ops, host_data);
 }
+EXPORT_SYMBOL_GPL(irq_domain_add_simple);
 
 /**
  * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
@@ -397,11 +401,12 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
        while (count--) {
                int irq = irq_base + count;
                struct irq_data *irq_data = irq_get_irq_data(irq);
-               irq_hw_number_t hwirq = irq_data->hwirq;
+               irq_hw_number_t hwirq;
 
                if (WARN_ON(!irq_data || irq_data->domain != domain))
                        continue;
 
+               hwirq = irq_data->hwirq;
                irq_set_status_flags(irq, IRQ_NOREQUEST);
 
                /* remove chip and handler */
index 1296e72e4161be305e37606f6024db1a84ff73a3..8241906c4b61a0887304f6524516563707d1a876 100644 (file)
@@ -569,6 +569,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
        int retval = 0;
 
        helper_lock();
+       if (!sub_info->path) {
+               retval = -EINVAL;
+               goto out;
+       }
+
        if (sub_info->path[0] == '\0')
                goto out;
 
index b049939177f6d3a4983341b42eb4719c27b4af62..cab4bce49c23dbe3779d02db8259dda28b7b4258 100644 (file)
@@ -2431,10 +2431,10 @@ static void kmemleak_load_module(const struct module *mod,
        kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
 
        for (i = 1; i < info->hdr->e_shnum; i++) {
-               const char *name = info->secstrings + info->sechdrs[i].sh_name;
-               if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
-                       continue;
-               if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
+               /* Scan all writable sections that's not executable */
+               if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
+                   !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
+                   (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
                        continue;
 
                kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
@@ -2769,24 +2769,11 @@ static void find_module_sections(struct module *mod, struct load_info *info)
        mod->trace_events = section_objs(info, "_ftrace_events",
                                         sizeof(*mod->trace_events),
                                         &mod->num_trace_events);
-       /*
-        * This section contains pointers to allocated objects in the trace
-        * code and not scanning it leads to false positives.
-        */
-       kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
-                          mod->num_trace_events, GFP_KERNEL);
 #endif
 #ifdef CONFIG_TRACING
        mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
                                         sizeof(*mod->trace_bprintk_fmt_start),
                                         &mod->num_trace_bprintk_fmt);
-       /*
-        * This section contains pointers to allocated objects in the trace
-        * code and not scanning it leads to false positives.
-        */
-       kmemleak_scan_area(mod->trace_bprintk_fmt_start,
-                          sizeof(*mod->trace_bprintk_fmt_start) *
-                          mod->num_trace_bprintk_fmt, GFP_KERNEL);
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
        /* sechdrs[0].sh_size is always zero */
index fa36e149442092f28dd339b03345f2a07fe91b31..8212c1aef125f2d4290cbe0e55564d7e9a5a57de 100644 (file)
@@ -363,6 +363,53 @@ static void log_store(int facility, int level,
        log_next_seq++;
 }
 
+#ifdef CONFIG_SECURITY_DMESG_RESTRICT
+int dmesg_restrict = 1;
+#else
+int dmesg_restrict;
+#endif
+
+static int syslog_action_restricted(int type)
+{
+       if (dmesg_restrict)
+               return 1;
+       /*
+        * Unless restricted, we allow "read all" and "get buffer size"
+        * for everybody.
+        */
+       return type != SYSLOG_ACTION_READ_ALL &&
+              type != SYSLOG_ACTION_SIZE_BUFFER;
+}
+
+static int check_syslog_permissions(int type, bool from_file)
+{
+       /*
+        * If this is from /proc/kmsg and we've already opened it, then we've
+        * already done the capabilities checks at open time.
+        */
+       if (from_file && type != SYSLOG_ACTION_OPEN)
+               return 0;
+
+       if (syslog_action_restricted(type)) {
+               if (capable(CAP_SYSLOG))
+                       return 0;
+               /*
+                * For historical reasons, accept CAP_SYS_ADMIN too, with
+                * a warning.
+                */
+               if (capable(CAP_SYS_ADMIN)) {
+                       pr_warn_once("%s (%d): Attempt to access syslog with "
+                                    "CAP_SYS_ADMIN but no CAP_SYSLOG "
+                                    "(deprecated).\n",
+                                current->comm, task_pid_nr(current));
+                       return 0;
+               }
+               return -EPERM;
+       }
+       return security_syslog(type);
+}
+
+
 /* /dev/kmsg - userspace message inject/listen interface */
 struct devkmsg_user {
        u64 seq;
@@ -620,7 +667,8 @@ static int devkmsg_open(struct inode *inode, struct file *file)
        if ((file->f_flags & O_ACCMODE) == O_WRONLY)
                return 0;
 
-       err = security_syslog(SYSLOG_ACTION_READ_ALL);
+       err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
+                                      SYSLOG_FROM_READER);
        if (err)
                return err;
 
@@ -813,45 +861,6 @@ static inline void boot_delay_msec(int level)
 }
 #endif
 
-#ifdef CONFIG_SECURITY_DMESG_RESTRICT
-int dmesg_restrict = 1;
-#else
-int dmesg_restrict;
-#endif
-
-static int syslog_action_restricted(int type)
-{
-       if (dmesg_restrict)
-               return 1;
-       /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
-       return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
-}
-
-static int check_syslog_permissions(int type, bool from_file)
-{
-       /*
-        * If this is from /proc/kmsg and we've already opened it, then we've
-        * already done the capabilities checks at open time.
-        */
-       if (from_file && type != SYSLOG_ACTION_OPEN)
-               return 0;
-
-       if (syslog_action_restricted(type)) {
-               if (capable(CAP_SYSLOG))
-                       return 0;
-               /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
-               if (capable(CAP_SYS_ADMIN)) {
-                       printk_once(KERN_WARNING "%s (%d): "
-                                "Attempt to access syslog with CAP_SYS_ADMIN "
-                                "but no CAP_SYSLOG (deprecated).\n",
-                                current->comm, task_pid_nr(current));
-                       return 0;
-               }
-               return -EPERM;
-       }
-       return 0;
-}
-
 #if defined(CONFIG_PRINTK_TIME)
 static bool printk_time = 1;
 #else
@@ -1249,7 +1258,7 @@ out:
 
 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
 {
-       return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
+       return do_syslog(type, buf, len, SYSLOG_FROM_READER);
 }
 
 /*
index 071b0ab455cb39b81d78b362a86ff6ee86263a55..eb911dbce2679d29fea7226084be73d34a7336dd 100644 (file)
@@ -48,9 +48,11 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
                final_start = min(range[i].start, start);
                final_end = max(range[i].end, end);
 
-               range[i].start = final_start;
-               range[i].end =  final_end;
-               return nr_range;
+               /* clear it and add it back for further merge */
+               range[i].start = 0;
+               range[i].end =  0;
+               return add_range_with_merge(range, az, nr_range,
+                       final_start, final_end);
        }
 
        /* Need to add it: */
index 16ea67925015f19e7f693ed7a4557d898942beea..35380019f0fc101df423dab03d3b418ec2291eac 100644 (file)
@@ -1451,9 +1451,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
 #ifdef CONFIG_PROVE_RCU_DELAY
-               if ((prandom_u32() % (rcu_num_nodes * 8)) == 0 &&
+               if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
                    system_state == SYSTEM_RUNNING)
-                       schedule_timeout_uninterruptible(2);
+                       udelay(200);
 #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
                cond_resched();
        }
@@ -1613,6 +1613,14 @@ static int __noreturn rcu_gp_kthread(void *arg)
        }
 }
 
+static void rsp_wakeup(struct irq_work *work)
+{
+       struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
+
+       /* Wake up rcu_gp_kthread() to start the grace period. */
+       wake_up(&rsp->gp_wq);
+}
+
 /*
  * Start a new RCU grace period if warranted, re-initializing the hierarchy
  * in preparation for detecting the next grace period.  The caller must hold
@@ -1637,8 +1645,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
        }
        rsp->gp_flags = RCU_GP_FLAG_INIT;
 
-       /* Wake up rcu_gp_kthread() to start the grace period. */
-       wake_up(&rsp->gp_wq);
+       /*
+        * We can't do wakeups while holding the rnp->lock, as that
+        * could cause possible deadlocks with the rq->lock. Deter
+        * the wakeup to interrupt context.
+        */
+       irq_work_queue(&rsp->wakeup_work);
 }
 
 /*
@@ -3235,6 +3247,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
 
        rsp->rda = rda;
        init_waitqueue_head(&rsp->gp_wq);
+       init_irq_work(&rsp->wakeup_work, rsp_wakeup);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
index da77a8f57ff95f80c7546684eba2ac293fdde08c..4df503470e420a24420edaf5d836d6aae508a155 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/seqlock.h>
+#include <linux/irq_work.h>
 
 /*
  * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -442,6 +443,7 @@ struct rcu_state {
        char *name;                             /* Name of structure. */
        char abbr;                              /* Abbreviated name. */
        struct list_head flavors;               /* List of RCU flavors. */
+       struct irq_work wakeup_work;            /* Postponed wakeups */
 };
 
 /* Values for rcu_state structure's gp_flags field. */
index 170814dc418f63a4753b1cfdae39d4d2dbff78cf..3db5a375d8dd52a93f64b48a5d0d7cad5a59559a 100644 (file)
@@ -88,7 +88,7 @@ static void __init rcu_bootup_announce_oddness(void)
 #ifdef CONFIG_RCU_NOCB_CPU
 #ifndef CONFIG_RCU_NOCB_CPU_NONE
        if (!have_rcu_nocb_mask) {
-               alloc_bootmem_cpumask_var(&rcu_nocb_mask);
+               zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
                have_rcu_nocb_mask = true;
        }
 #ifdef CONFIG_RCU_NOCB_CPU_ZERO
@@ -1667,7 +1667,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
        rdtp->last_accelerate = jiffies;
 
        /* Request timer delay depending on laziness, and round. */
-       if (rdtp->all_lazy) {
+       if (!rdtp->all_lazy) {
                *dj = round_up(rcu_idle_gp_delay + jiffies,
                               rcu_idle_gp_delay) - jiffies;
        } else {
index b5197dcb0dadb236ea81739bb78c703a01787404..3d6833f125d307214bae0210bd68cbaaa925755d 100644 (file)
@@ -195,8 +195,12 @@ void local_bh_enable_ip(unsigned long ip)
 EXPORT_SYMBOL(local_bh_enable_ip);
 
 /*
- * We restart softirq processing for at most 2 ms,
- * and if need_resched() is not set.
+ * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
+ * but break the loop if need_resched() is set or after 2 ms.
+ * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
+ * certain cases, such as stop_machine(), jiffies may cease to
+ * increment and so we need the MAX_SOFTIRQ_RESTART limit as
+ * well to make sure we eventually return from this method.
  *
  * These limits have been established via experimentation.
  * The two things to balance is latency against fairness -
@@ -204,6 +208,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
  * should not be able to lock up the box.
  */
 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
+#define MAX_SOFTIRQ_RESTART 10
 
 asmlinkage void __do_softirq(void)
 {
@@ -212,6 +217,7 @@ asmlinkage void __do_softirq(void)
        unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
        int cpu;
        unsigned long old_flags = current->flags;
+       int max_restart = MAX_SOFTIRQ_RESTART;
 
        /*
         * Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -265,7 +271,8 @@ restart:
 
        pending = local_softirq_pending();
        if (pending) {
-               if (time_before(jiffies, end) && !need_resched())
+               if (time_before(jiffies, end) && !need_resched() &&
+                   --max_restart)
                        goto restart;
 
                wakeup_softirqd();
index b95d3c72ba211a44c955ce3ae58b07fa72980cb1..2bbd9a73b54c27b0e75eb651d931f0987ff870ed 100644 (file)
@@ -362,6 +362,29 @@ int unregister_reboot_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_reboot_notifier);
 
+/* Add backwards compatibility for stable trees. */
+#ifndef PF_NO_SETAFFINITY
+#define PF_NO_SETAFFINITY              PF_THREAD_BOUND
+#endif
+
+static void migrate_to_reboot_cpu(void)
+{
+       /* The boot cpu is always logical cpu 0 */
+       int cpu = 0;
+
+       cpu_hotplug_disable();
+
+       /* Make certain the cpu I'm about to reboot on is online */
+       if (!cpu_online(cpu))
+               cpu = cpumask_first(cpu_online_mask);
+
+       /* Prevent races with other tasks migrating this task */
+       current->flags |= PF_NO_SETAFFINITY;
+
+       /* Make certain I only run on the appropriate processor */
+       set_cpus_allowed_ptr(current, cpumask_of(cpu));
+}
+
 /**
  *     kernel_restart - reboot the system
  *     @cmd: pointer to buffer containing command to execute for restart
@@ -373,7 +396,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
 void kernel_restart(char *cmd)
 {
        kernel_restart_prepare(cmd);
-       disable_nonboot_cpus();
+       migrate_to_reboot_cpu();
        syscore_shutdown();
        if (!cmd)
                printk(KERN_EMERG "Restarting system.\n");
@@ -400,7 +423,7 @@ static void kernel_shutdown_prepare(enum system_states state)
 void kernel_halt(void)
 {
        kernel_shutdown_prepare(SYSTEM_HALT);
-       disable_nonboot_cpus();
+       migrate_to_reboot_cpu();
        syscore_shutdown();
        printk(KERN_EMERG "System halted.\n");
        kmsg_dump(KMSG_DUMP_HALT);
@@ -419,7 +442,7 @@ void kernel_power_off(void)
        kernel_shutdown_prepare(SYSTEM_POWER_OFF);
        if (pm_power_off_prepare)
                pm_power_off_prepare();
-       disable_nonboot_cpus();
+       migrate_to_reboot_cpu();
        syscore_shutdown();
        printk(KERN_EMERG "Power down.\n");
        kmsg_dump(KMSG_DUMP_POWEROFF);
index e4c07b0692bbba4a79676ffceed77324fd6087bf..70f27e89012b29d18b4aeb9c6e5c367d1b3955a5 100644 (file)
@@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG
 config ARCH_CLOCKSOURCE_DATA
        bool
 
-# Platforms has a persistent clock
-config ALWAYS_USE_PERSISTENT_CLOCK
-       bool
-       default n
-
 # Timekeeping vsyscall support
 config GENERIC_TIME_VSYSCALL
        bool
index 12ff13a838c652967d83212baf4742d71862720a..8f5b3b98577b797663f8057762b6248d0fae1823 100644 (file)
@@ -874,7 +874,6 @@ static void hardpps_update_phase(long error)
 void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
 {
        struct pps_normtime pts_norm, freq_norm;
-       unsigned long flags;
 
        pts_norm = pps_normalize_ts(*phase_ts);
 
index 206bbfb34e091d21f5928a846f4e1a53ab984663..0c739423b0f9c5728de497e5391730e417536e61 100644 (file)
@@ -511,6 +511,12 @@ again:
                }
        }
 
+       /*
+        * Remove the current cpu from the pending mask. The event is
+        * delivered immediately in tick_do_broadcast() !
+        */
+       cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
+
        /* Take care of enforced broadcast requests */
        cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
        cpumask_clear(tick_broadcast_force_mask);
@@ -575,8 +581,8 @@ void tick_broadcast_oneshot_control(unsigned long reason)
 
        raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
        if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
-               WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
                if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
+                       WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
                        clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
                        /*
                         * We only reprogram the broadcast timer if we
@@ -786,11 +792,11 @@ bool tick_broadcast_oneshot_available(void)
 
 void __init tick_broadcast_init(void)
 {
-       alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
-       alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
 #ifdef CONFIG_TICK_ONESHOT
-       alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
-       alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
-       alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
 #endif
 }
index bc67d4245e1d130b523fd36482d50ebe4cbab763..f4208138fbf4cd1a72791f99c31adcbea9180e3e 100644 (file)
@@ -717,6 +717,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
        if (unlikely(!cpu_online(cpu))) {
                if (cpu == tick_do_timer_cpu)
                        tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+               return false;
        }
 
        if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
@@ -1168,7 +1169,7 @@ void tick_cancel_sched_timer(int cpu)
                hrtimer_cancel(&ts->sched_timer);
 # endif
 
-       ts->nohz_mode = NOHZ_MODE_INACTIVE;
+       memset(ts, 0, sizeof(*ts));
 }
 #endif
 
index 98cd470bbe4901569dad2a8ffb4a85f7945c5fca..baeeb5c87cf142a818122fac50140a0657f027bd 100644 (file)
@@ -975,6 +975,14 @@ static int timekeeping_suspend(void)
 
        read_persistent_clock(&timekeeping_suspend_time);
 
+       /*
+        * On some systems the persistent_clock can not be detected at
+        * timekeeping_init by its return value, so if we see a valid
+        * value returned, update the persistent_clock_exists flag.
+        */
+       if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
+               persistent_clock_exist = true;
+
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&timekeeper_seq);
        timekeeping_forward_now(tk);
index a860bba34412c0d8047ff4d81c54fb500fa5de9f..15ffdb3f1948b9468c2c04527beb8190b0d79d45 100644 (file)
@@ -1539,12 +1539,12 @@ static int __cpuinit init_timers_cpu(int cpu)
                        boot_done = 1;
                        base = &boot_tvec_bases;
                }
+               spin_lock_init(&base->lock);
                tvec_base_done[cpu] = 1;
        } else {
                base = per_cpu(tvec_bases, cpu);
        }
 
-       spin_lock_init(&base->lock);
 
        for (j = 0; j < TVN_SIZE; j++) {
                INIT_LIST_HEAD(base->tv5.vec + j);
index b549b0f5b9771624159d8dbbf1b6a103127a842f..6c508ff33c6206df8e028e1eab43e913565f927e 100644 (file)
@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 
 /*
  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
+ * can use rcu_dereference_raw_notrace() is that elements removed from this list
  * are simply leaked, so there is no need to interact with a grace-period
- * mechanism.  The rcu_dereference_raw() calls are needed to handle
+ * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
  * concurrent insertions into the ftrace_global_list.
  *
  * Silly Alpha and silly pointer-speculation compiler optimizations!
  */
 #define do_for_each_ftrace_op(op, list)                        \
-       op = rcu_dereference_raw(list);                 \
+       op = rcu_dereference_raw_notrace(list);                 \
        do
 
 /*
  * Optimized for just a single item in the list (as that is the normal case).
  */
 #define while_for_each_ftrace_op(op)                           \
-       while (likely(op = rcu_dereference_raw((op)->next)) &&  \
+       while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
               unlikely((op) != &ftrace_list_end))
 
 static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
        if (hlist_empty(hhd))
                return NULL;
 
-       hlist_for_each_entry_rcu(rec, hhd, node) {
+       hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
                if (rec->ip == ip)
                        return rec;
        }
@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
 
        hhd = &hash->buckets[key];
 
-       hlist_for_each_entry_rcu(entry, hhd, hlist) {
+       hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
                if (entry->ip == ip)
                        return entry;
        }
@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
        struct ftrace_hash *notrace_hash;
        int ret;
 
-       filter_hash = rcu_dereference_raw(ops->filter_hash);
-       notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+       filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
+       notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
 
        if ((ftrace_hash_empty(filter_hash) ||
             ftrace_lookup_ip(filter_hash, ip)) &&
@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
         * on the hash. rcu_read_lock is too dangerous here.
         */
        preempt_disable_notrace();
-       hlist_for_each_entry_rcu(entry, hhd, node) {
+       hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
                if (entry->ip == ip)
                        entry->ops->func(ip, parent_ip, &entry->data);
        }
index b59aea2c48c287f5de894efcba7d53c02fd6f279..e444ff88f0a425a00a4921291b46678b6a7efe21 100644 (file)
@@ -620,6 +620,9 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
        if (cpu == RING_BUFFER_ALL_CPUS)
                work = &buffer->irq_work;
        else {
+               if (!cpumask_test_cpu(cpu, buffer->cpumask))
+                       return -EINVAL;
+
                cpu_buffer = buffer->buffers[cpu];
                work = &cpu_buffer->irq_work;
        }
index ae6fa2d1cdf7dd75dd7e9aa4cfb66bcb32192315..e71a8be4a6ee9decd1429eb13159885f8c567469 100644 (file)
@@ -652,8 +652,6 @@ static struct {
        ARCH_TRACE_CLOCKS
 };
 
-int trace_clock_id;
-
 /*
  * trace_parser_get_init - gets the buffer for trace parser
  */
@@ -843,7 +841,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
        max_data->pid = tsk->pid;
-       max_data->uid = task_uid(tsk);
+       /*
+        * If tsk == current, then use current_uid(), as that does not use
+        * RCU. The irq tracer can be called out of RCU scope.
+        */
+       if (tsk == current)
+               max_data->uid = current_uid();
+       else
+               max_data->uid = task_uid(tsk);
+
        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
        max_data->policy = tsk->policy;
        max_data->rt_priority = tsk->rt_priority;
@@ -2818,7 +2824,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
                iter->iter_flags |= TRACE_FILE_ANNOTATE;
 
        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
-       if (trace_clocks[trace_clock_id].in_ns)
+       if (trace_clocks[tr->clock_id].in_ns)
                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
        /* stop the trace while dumping if we are not opening "snapshot" */
@@ -3817,7 +3823,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
                iter->iter_flags |= TRACE_FILE_LAT_FMT;
 
        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
-       if (trace_clocks[trace_clock_id].in_ns)
+       if (trace_clocks[tr->clock_id].in_ns)
                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
        iter->cpu_file = tc->cpu;
@@ -5087,7 +5093,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
        cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
        trace_seq_printf(s, "bytes: %ld\n", cnt);
 
-       if (trace_clocks[trace_clock_id].in_ns) {
+       if (trace_clocks[tr->clock_id].in_ns) {
                /* local or global for trace_clock */
                t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
                usec_rem = do_div(t, USEC_PER_SEC);
@@ -6216,10 +6222,15 @@ __init static int tracer_alloc_buffers(void)
 
        trace_init_cmdlines();
 
-       register_tracer(&nop_trace);
-
+       /*
+        * register_tracer() might reference current_trace, so it
+        * needs to be set before we register anything. This is
+        * just a bootstrap of current_trace anyway.
+        */
        global_trace.current_trace = &nop_trace;
 
+       register_tracer(&nop_trace);
+
        /* All seems OK, enable tracing */
        tracing_disabled = 0;
 
index 711ca7d3e7f18a7dd3b1de3506011934dae14765..20572ed88c5c3b791bef107aab7fbc1f3b0b44d1 100644 (file)
@@ -700,8 +700,6 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
 
 extern unsigned long trace_flags;
 
-extern int trace_clock_id;
-
 /* Standard output formatting function used for function return traces */
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
index 7a0cf68027ccf62118d4eb1da83ec00af3a0b49b..27963e2bf4bfe141d0884bbe9f05b0d74a7fcc47 100644 (file)
@@ -2072,8 +2072,10 @@ event_enable_func(struct ftrace_hash *hash,
  out_reg:
        /* Don't let event modules unload while probe registered */
        ret = try_module_get(file->event_call->mod);
-       if (!ret)
+       if (!ret) {
+               ret = -EBUSY;
                goto out_free;
+       }
 
        ret = __ftrace_event_enable_disable(file, 1, 1);
        if (ret < 0)
index a6361178de5ae4e17d0501db852a06ae80b06cf4..e1b653f7e1ca101f0861351d610b5c8297a4b3ca 100644 (file)
@@ -750,7 +750,11 @@ static int filter_set_pred(struct event_filter *filter,
 
 static void __free_preds(struct event_filter *filter)
 {
+       int i;
+
        if (filter->preds) {
+               for (i = 0; i < filter->n_preds; i++)
+                       kfree(filter->preds[i].ops);
                kfree(filter->preds);
                filter->preds = NULL;
        }
index 636d45fe69b37a80eefafe6b3aa6d46220f132dd..9f46e98ba8f22a712962ccc68e17b34f166439ba 100644 (file)
@@ -35,7 +35,7 @@ struct trace_probe {
        const char              *symbol;        /* symbol name */
        struct ftrace_event_class       class;
        struct ftrace_event_call        call;
-       struct ftrace_event_file        **files;
+       struct ftrace_event_file * __rcu *files;
        ssize_t                 size;           /* trace entry size */
        unsigned int            nr_args;
        struct probe_arg        args[];
@@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event,
 
 static int trace_probe_nr_files(struct trace_probe *tp)
 {
-       struct ftrace_event_file **file = tp->files;
+       struct ftrace_event_file **file;
        int ret = 0;
 
+       /*
+        * Since all tp->files updater is protected by probe_enable_lock,
+        * we don't need to lock an rcu_read_lock.
+        */
+       file = rcu_dereference_raw(tp->files);
        if (file)
                while (*(file++))
                        ret++;
@@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
        mutex_lock(&probe_enable_lock);
 
        if (file) {
-               struct ftrace_event_file **new, **old = tp->files;
+               struct ftrace_event_file **new, **old;
                int n = trace_probe_nr_files(tp);
 
+               old = rcu_dereference_raw(tp->files);
                /* 1 is for new one and 1 is for stopper */
                new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
                              GFP_KERNEL);
@@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 static int
 trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
 {
+       struct ftrace_event_file **files;
        int i;
 
-       if (tp->files) {
-               for (i = 0; tp->files[i]; i++)
-                       if (tp->files[i] == file)
+       /*
+        * Since all tp->files updater is protected by probe_enable_lock,
+        * we don't need to lock an rcu_read_lock.
+        */
+       files = rcu_dereference_raw(tp->files);
+       if (files) {
+               for (i = 0; files[i]; i++)
+                       if (files[i] == file)
                                return i;
        }
 
@@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
        mutex_lock(&probe_enable_lock);
 
        if (file) {
-               struct ftrace_event_file **new, **old = tp->files;
+               struct ftrace_event_file **new, **old;
                int n = trace_probe_nr_files(tp);
                int i, j;
 
+               old = rcu_dereference_raw(tp->files);
                if (n == 0 || trace_probe_file_index(tp, file) < 0) {
                        ret = -EINVAL;
                        goto out_unlock;
@@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
 static __kprobes void
 kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
 {
-       struct ftrace_event_file **file = tp->files;
+       /*
+        * Note: preempt is already disabled around the kprobe handler.
+        * However, we still need an smp_read_barrier_depends() corresponding
+        * to smp_wmb() in rcu_assign_pointer() to access the pointer.
+        */
+       struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
+
+       if (unlikely(!file))
+               return;
 
-       /* Note: preempt is already disabled around the kprobe handler */
        while (*file) {
                __kprobe_trace_func(tp, regs, *file);
                file++;
@@ -925,9 +945,16 @@ static __kprobes void
 kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
                     struct pt_regs *regs)
 {
-       struct ftrace_event_file **file = tp->files;
+       /*
+        * Note: preempt is already disabled around the kprobe handler.
+        * However, we still need an smp_read_barrier_depends() corresponding
+        * to smp_wmb() in rcu_assign_pointer() to access the pointer.
+        */
+       struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
+
+       if (unlikely(!file))
+               return;
 
-       /* Note: preempt is already disabled around the kprobe handler */
        while (*file) {
                __kretprobe_trace_func(tp, ri, regs, *file);
                file++;
@@ -935,7 +962,7 @@ kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
 }
 
 /* Event entry printers */
-enum print_line_t
+static enum print_line_t
 print_kprobe_event(struct trace_iterator *iter, int flags,
                   struct trace_event *event)
 {
@@ -971,7 +998,7 @@ partial:
        return TRACE_TYPE_PARTIAL_LINE;
 }
 
-enum print_line_t
+static enum print_line_t
 print_kretprobe_event(struct trace_iterator *iter, int flags,
                      struct trace_event *event)
 {
index 55e2cf66967be5ee7d3db82db47e5371e9816a1a..2901e3b8859066ed32a703143f0372089285109b 100644 (file)
@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check the trace buffer */
-       ret = trace_test_buffer(tr, &count);
+       ret = trace_test_buffer(&tr->trace_buffer, &count);
        trace->reset(tr);
        tracing_start();
 
index 4aa9f5bc6b2dfdd9f152f5d20bf06c15f070aa21..ee8e29a2320c7c76d67df8f4816af52cd9da5f68 100644 (file)
@@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 
 struct workqueue_struct *system_wq __read_mostly;
-EXPORT_SYMBOL_GPL(system_wq);
+EXPORT_SYMBOL(system_wq);
 struct workqueue_struct *system_highpri_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_highpri_wq);
 struct workqueue_struct *system_long_wq __read_mostly;
@@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
        local_irq_restore(flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_work_on);
+EXPORT_SYMBOL(queue_work_on);
 
 void delayed_work_timer_fn(unsigned long __data)
 {
@@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        local_irq_restore(flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+EXPORT_SYMBOL(queue_delayed_work_on);
 
 /**
  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
@@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker)
        if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
                spin_unlock_irq(&pool->lock);
                mutex_lock(&pool->manager_mutex);
+               spin_lock_irq(&pool->lock);
                ret = true;
        }
 
@@ -4311,6 +4312,12 @@ bool current_is_workqueue_rescuer(void)
  * no synchronization around this function and the test result is
  * unreliable and only useful as advisory hints or for debugging.
  *
+ * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
+ * Note that both per-cpu and unbound workqueues may be associated with
+ * multiple pool_workqueues which have separate congested states.  A
+ * workqueue being congested on one CPU doesn't mean the workqueue is also
+ * contested on other CPUs / NUMA nodes.
+ *
  * RETURNS:
  * %true if congested, %false otherwise.
  */
@@ -4321,6 +4328,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
 
        rcu_read_lock_sched();
 
+       if (cpu == WORK_CPU_UNBOUND)
+               cpu = smp_processor_id();
+
        if (!(wq->flags & WQ_UNBOUND))
                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
        else
@@ -4895,7 +4905,8 @@ static void __init wq_numa_init(void)
        BUG_ON(!tbl);
 
        for_each_node(node)
-               BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
+               BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+                               node_online(node) ? node : NUMA_NO_NODE));
 
        for_each_possible_cpu(cpu) {
                node = cpu_to_node(cpu);
index e9c52e1b853a658c0b99580e092023d7f53a341e..c55a037a354eb9ffbd8f492f8e07c92e3aa0b941 100644 (file)
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-        gcd.o lcm.o list_sort.o uuid.o flex_array.o \
+        gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
diff --git a/lib/iovec.c b/lib/iovec.c
new file mode 100644 (file)
index 0000000..454baa8
--- /dev/null
@@ -0,0 +1,53 @@
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/uio.h>
+
+/*
+ *     Copy iovec to kernel. Returns -EFAULT on error.
+ *
+ *     Note: this modifies the original iovec.
+ */
+
+int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
+{
+       while (len > 0) {
+               if (iov->iov_len) {
+                       int copy = min_t(unsigned int, len, iov->iov_len);
+                       if (copy_from_user(kdata, iov->iov_base, copy))
+                               return -EFAULT;
+                       len -= copy;
+                       kdata += copy;
+                       iov->iov_base += copy;
+                       iov->iov_len -= copy;
+               }
+               iov++;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovec);
+
+/*
+ *     Copy kernel to iovec. Returns -EFAULT on error.
+ *
+ *     Note: this modifies the original iovec.
+ */
+
+int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
+{
+       while (len > 0) {
+               if (iov->iov_len) {
+                       int copy = min_t(unsigned int, iov->iov_len, len);
+                       if (copy_to_user(iov->iov_base, kdata, copy))
+                               return -EFAULT;
+                       kdata += copy;
+                       len -= copy;
+                       iov->iov_len -= copy;
+                       iov->iov_base += copy;
+               }
+               iov++;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_toiovec);
index 0874e41609a6fbf4d786f3b49aa76f85130893f7..358a368a2947057ef9d9309c9dd4fc893523d63d 100644 (file)
@@ -193,10 +193,10 @@ static void klist_release(struct kref *kref)
                if (waiter->node != n)
                        continue;
 
+               list_del(&waiter->list);
                waiter->woken = 1;
                mb();
                wake_up_process(waiter->process);
-               list_del(&waiter->list);
        }
        spin_unlock(&klist_remove_lock);
        knode_set_klist(n, NULL);
index 095ab157a5215a800d01585bbb50b0d6b6a88a17..d411355f238e2088247f0cbf51631c77ae78b10e 100644 (file)
@@ -318,7 +318,8 @@ extern UDItype __udiv_qrnnd();
             "rM" ((USItype)(bh)), \
             "rM" ((USItype)(al)), \
             "rM" ((USItype)(bl)))
-#if defined(_PA_RISC1_1)
+#if 0 && defined(_PA_RISC1_1)
+/* xmpyu uses floating point register which is not allowed in Linux kernel. */
 #define umul_ppmm(wh, wl, u, v) \
 do { \
        union {UDItype __ll; \
@@ -337,7 +338,7 @@ do { \
 #define UMUL_TIME 40
 #define UDIV_TIME 80
 #endif
-#ifndef LONGLONG_STANDALONE
+#if 0 /* #ifndef LONGLONG_STANDALONE */
 #define udiv_qrnnd(q, r, n1, n0, d) \
 do { USItype __r; \
        (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
index 5f9c44cdf1f548f5440e6ee86473af3ab5248b32..4cc6442733f49577f29647984db9f310d06a21c4 100644 (file)
@@ -37,7 +37,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
        mpi_limb_t a;
        MPI val = NULL;
 
-       while (nbytes >= 0 && buffer[0] == 0) {
+       while (nbytes > 0 && buffer[0] == 0) {
                buffer++;
                nbytes--;
        }
index 538367ef137270b768481a42e2bf2ee504d0ada2..1b24bdcb3197492674b0d6d0c128f9928fb977b1 100644 (file)
@@ -319,7 +319,7 @@ void __frontswap_invalidate_area(unsigned type)
                        return;
                frontswap_ops->invalidate_area(type);
                atomic_set(&sis->frontswap_pages, 0);
-               memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+               bitmap_zero(sis->frontswap_map, sis->max);
        }
        clear_bit(type, need_init);
 }
index 03a89a2f464bef283770e84ad7186a5cc0915924..362c329b83fe7441b4d2119c1e164a54c58fc860 100644 (file)
@@ -2325,7 +2325,12 @@ static void collapse_huge_page(struct mm_struct *mm,
                pte_unmap(pte);
                spin_lock(&mm->page_table_lock);
                BUG_ON(!pmd_none(*pmd));
-               set_pmd_at(mm, address, pmd, _pmd);
+               /*
+                * We can only use set_pmd_at when establishing
+                * hugepmds and never for establishing regular pmds that
+                * points to regular pagetables. Use pmd_populate for that
+                */
+               pmd_populate(mm, pmd, pmd_pgtable(_pmd));
                spin_unlock(&mm->page_table_lock);
                anon_vma_unlock_write(vma->anon_vma);
                goto out;
index f8feeeca6686543713a1dada46759b60b2eae240..e2bfbf73a551d0747fbea1b69200dd773e36754e 100644 (file)
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (ptep) {
                entry = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_migration(entry))) {
-                       migration_entry_wait(mm, (pmd_t *)ptep, address);
+                       migration_entry_wait_huge(mm, ptep);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
index cb1c9dedf9b65c08a4a6d9d6f81ee01f2cb44c36..194721839cf5d303a0de2b4df611b700db895043 100644 (file)
@@ -1199,7 +1199,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 
                        mz = mem_cgroup_zoneinfo(root, nid, zid);
                        iter = &mz->reclaim_iter[reclaim->priority];
-                       last_visited = iter->last_visited;
                        if (prev && reclaim->generation != iter->generation) {
                                iter->last_visited = NULL;
                                goto out_unlock;
@@ -1218,13 +1217,12 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                         * is alive.
                         */
                        dead_count = atomic_read(&root->dead_count);
-                       smp_rmb();
-                       last_visited = iter->last_visited;
-                       if (last_visited) {
-                               if ((dead_count != iter->last_dead_count) ||
-                                       !css_tryget(&last_visited->css)) {
+                       if (dead_count == iter->last_dead_count) {
+                               smp_rmb();
+                               last_visited = iter->last_visited;
+                               if (last_visited &&
+                                   !css_tryget(&last_visited->css))
                                        last_visited = NULL;
-                               }
                        }
                }
 
@@ -3141,8 +3139,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
                        return -ENOMEM;
                }
 
-               INIT_WORK(&s->memcg_params->destroy,
-                               kmem_cache_destroy_work_func);
                s->memcg_params->is_root_cache = true;
 
                /*
@@ -4108,8 +4104,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
        if (mem_cgroup_disabled())
                return NULL;
 
-       VM_BUG_ON(PageSwapCache(page));
-
        if (PageTransHuge(page)) {
                nr_pages <<= compound_order(page);
                VM_BUG_ON(!PageTransHuge(page));
@@ -4205,6 +4199,18 @@ void mem_cgroup_uncharge_page(struct page *page)
        if (page_mapped(page))
                return;
        VM_BUG_ON(page->mapping && !PageAnon(page));
+       /*
+        * If the page is in swap cache, uncharge should be deferred
+        * to the swap path, which also properly accounts swap usage
+        * and handles memcg lifetime.
+        *
+        * Note that this check is not stable and reclaim may add the
+        * page to swap cache at any time after this.  However, if the
+        * page is not in swap cache by the time page->mapcount hits
+        * 0, there won't be any page table references to the swap
+        * slot, and reclaim will free it and not actually write the
+        * page to disk.
+        */
        if (PageSwapCache(page))
                return;
        __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
index 6dc1882fbd725c61badb4fd072418c7330001ce1..61a262b08e53efa2f69c1387e488bea6f87bb0f9 100644 (file)
@@ -220,7 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
        tlb->start      = -1UL;
        tlb->end        = 0;
        tlb->need_flush = 0;
-       tlb->fast_mode  = (num_possible_cpus() == 1);
        tlb->local.next = NULL;
        tlb->local.nr   = 0;
        tlb->local.max  = ARRAY_SIZE(tlb->__pages);
@@ -244,9 +243,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_table_flush(tlb);
 #endif
 
-       if (tlb_fast_mode(tlb))
-               return;
-
        for (batch = &tlb->local; batch; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
                batch->nr = 0;
@@ -288,11 +284,6 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 
        VM_BUG_ON(!tlb->need_flush);
 
-       if (tlb_fast_mode(tlb)) {
-               free_page_and_swap_cache(page);
-               return 1; /* avoid calling tlb_flush_mmu() */
-       }
-
        batch = tlb->active;
        batch->pages[batch->nr++] = page;
        if (batch->nr == batch->max) {
index a221fac1f47d39aef56278758765c023a7981796..1ad92b46753edfe8d9f54fae81c5f3aa15b34110 100644 (file)
@@ -720,9 +720,12 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
        start = phys_start_pfn << PAGE_SHIFT;
        size = nr_pages * PAGE_SIZE;
        ret = release_mem_region_adjustable(&iomem_resource, start, size);
-       if (ret)
-               pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n",
-                               start, start + size - 1, ret);
+       if (ret) {
+               resource_size_t endres = start + size - 1;
+
+               pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
+                               &start, &endres, ret);
+       }
 
        sections_to_remove = nr_pages / PAGES_PER_SECTION;
        for (i = 0; i < sections_to_remove; i++) {
index 27ed22579fd97a21171b952deb78316438e69297..6f0c24438bbaaf6ffdaa4f840ab8da37cf9e236a 100644 (file)
@@ -165,7 +165,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
                pte = arch_make_huge_pte(pte, vma, new, 0);
        }
 #endif
-       flush_cache_page(vma, addr, pte_pfn(pte));
+       flush_dcache_page(new);
        set_pte_at(mm, addr, ptep, pte);
 
        if (PageHuge(new)) {
@@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
  * get to the page and wait until migration is finished.
  * When we return from this function the fault will be retried.
  */
-void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-                               unsigned long address)
+static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+                               spinlock_t *ptl)
 {
-       pte_t *ptep, pte;
-       spinlock_t *ptl;
+       pte_t pte;
        swp_entry_t entry;
        struct page *page;
 
-       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+       spin_lock(ptl);
        pte = *ptep;
        if (!is_swap_pte(pte))
                goto out;
@@ -236,6 +235,20 @@ out:
        pte_unmap_unlock(ptep, ptl);
 }
 
+void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+                               unsigned long address)
+{
+       spinlock_t *ptl = pte_lockptr(mm, pmd);
+       pte_t *ptep = pte_offset_map(pmd, address);
+       __migration_entry_wait(mm, ptep, ptl);
+}
+
+void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
+{
+       spinlock_t *ptl = &(mm)->page_table_lock;
+       __migration_entry_wait(mm, pte, ptl);
+}
+
 #ifdef CONFIG_BLOCK
 /* Returns true if all buffers are successfully locked */
 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
index be04122fb277acd6a43a2f020e184ed2065258df..6725ff183374280ac9a5dcc786cd42f91d3cf669 100644 (file)
@@ -40,48 +40,44 @@ void __mmu_notifier_release(struct mm_struct *mm)
        int id;
 
        /*
-        * srcu_read_lock() here will block synchronize_srcu() in
-        * mmu_notifier_unregister() until all registered
-        * ->release() callouts this function makes have
-        * returned.
+        * SRCU here will block mmu_notifier_unregister until
+        * ->release returns.
         */
        id = srcu_read_lock(&srcu);
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
+               /*
+                * If ->release runs before mmu_notifier_unregister it must be
+                * handled, as it's the only way for the driver to flush all
+                * existing sptes and stop the driver from establishing any more
+                * sptes before all the pages in the mm are freed.
+                */
+               if (mn->ops->release)
+                       mn->ops->release(mn, mm);
+       srcu_read_unlock(&srcu, id);
+
        spin_lock(&mm->mmu_notifier_mm->lock);
        while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
                mn = hlist_entry(mm->mmu_notifier_mm->list.first,
                                 struct mmu_notifier,
                                 hlist);
-
                /*
-                * Unlink.  This will prevent mmu_notifier_unregister()
-                * from also making the ->release() callout.
+                * We arrived before mmu_notifier_unregister so
+                * mmu_notifier_unregister will do nothing other than to wait
+                * for ->release to finish and for mmu_notifier_unregister to
+                * return.
                 */
                hlist_del_init_rcu(&mn->hlist);
-               spin_unlock(&mm->mmu_notifier_mm->lock);
-
-               /*
-                * Clear sptes. (see 'release' description in mmu_notifier.h)
-                */
-               if (mn->ops->release)
-                       mn->ops->release(mn, mm);
-
-               spin_lock(&mm->mmu_notifier_mm->lock);
        }
        spin_unlock(&mm->mmu_notifier_mm->lock);
 
        /*
-        * All callouts to ->release() which we have done are complete.
-        * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
-        */
-       srcu_read_unlock(&srcu, id);
-
-       /*
-        * mmu_notifier_unregister() may have unlinked a notifier and may
-        * still be calling out to it.  Additionally, other notifiers
-        * may have been active via vmtruncate() et. al. Block here
-        * to ensure that all notifier callouts for this mm have been
-        * completed and the sptes are really cleaned up before returning
-        * to exit_mmap().
+        * synchronize_srcu here prevents mmu_notifier_release from returning to
+        * exit_mmap (which would proceed with freeing all pages in the mm)
+        * until the ->release method returns, if it was invoked by
+        * mmu_notifier_unregister.
+        *
+        * The mmu_notifier_mm can't go away from under us because one mm_count
+        * is held by exit_mmap.
         */
        synchronize_srcu(&srcu);
 }
@@ -292,31 +288,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
 {
        BUG_ON(atomic_read(&mm->mm_count) <= 0);
 
-       spin_lock(&mm->mmu_notifier_mm->lock);
        if (!hlist_unhashed(&mn->hlist)) {
+               /*
+                * SRCU here will force exit_mmap to wait for ->release to
+                * finish before freeing the pages.
+                */
                int id;
 
+               id = srcu_read_lock(&srcu);
                /*
-                * Ensure we synchronize up with __mmu_notifier_release().
+                * exit_mmap will block in mmu_notifier_release to guarantee
+                * that ->release is called before freeing the pages.
                 */
-               id = srcu_read_lock(&srcu);
-
-               hlist_del_rcu(&mn->hlist);
-               spin_unlock(&mm->mmu_notifier_mm->lock);
-
                if (mn->ops->release)
                        mn->ops->release(mn, mm);
+               srcu_read_unlock(&srcu, id);
 
+               spin_lock(&mm->mmu_notifier_mm->lock);
                /*
-                * Allow __mmu_notifier_release() to complete.
+                * Can not use list_del_rcu() since __mmu_notifier_release
+                * can delete it before we hold the lock.
                 */
-               srcu_read_unlock(&srcu, id);
-       } else
+               hlist_del_init_rcu(&mn->hlist);
                spin_unlock(&mm->mmu_notifier_mm->lock);
+       }
 
        /*
-        * Wait for any running method to finish, including ->release() if it
-        * was run by __mmu_notifier_release() instead of us.
+        * Wait for any running method to finish, of course including
+        * ->release if it was run by mmu_notifier_relase instead of us.
         */
        synchronize_srcu(&srcu);
 
index 98cbdf6e553217a87b62daac80d26c026ecf60e3..c3edb624fccf30c303ccff94cf139e090b416d7e 100644 (file)
@@ -1628,6 +1628,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
        long min = mark;
        long lowmem_reserve = z->lowmem_reserve[classzone_idx];
        int o;
+       long free_cma = 0;
 
        free_pages -= (1 << order) - 1;
        if (alloc_flags & ALLOC_HIGH)
@@ -1637,9 +1638,10 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 #ifdef CONFIG_CMA
        /* If allocation can't use CMA areas don't use free CMA pages */
        if (!(alloc_flags & ALLOC_CMA))
-               free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
+               free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
 #endif
-       if (free_pages <= min + lowmem_reserve)
+
+       if (free_pages - free_cma <= min + lowmem_reserve)
                return false;
        for (o = 0; o < order; o++) {
                /* At the next order, this order's pages become unavailable */
@@ -5158,7 +5160,7 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
        for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
                if (poison)
                        memset((void *)pos, poison, PAGE_SIZE);
-               free_reserved_page(virt_to_page(pos));
+               free_reserved_page(virt_to_page((void *)pos));
        }
 
        if (pages && s)
index 35aa294656cd812d2773fede0d51179a8b7ee09a..5da2cbcfdbb56b0e9f4fe27d6e04137e59dfce3b 100644 (file)
@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
        return 0;
 }
 
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       struct vm_area_struct *vma;
-
-       /* We don't need vma lookup at all. */
-       if (!walk->hugetlb_entry)
-               return NULL;
-
-       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
-       vma = find_vma(walk->mm, addr);
-       if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
-               return vma;
-
-       return NULL;
-}
-
 #else /* CONFIG_HUGETLB_PAGE */
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       return NULL;
-}
-
 static int walk_hugetlb_range(struct vm_area_struct *vma,
                              unsigned long addr, unsigned long end,
                              struct mm_walk *walk)
@@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
        if (!walk->mm)
                return -EINVAL;
 
+       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+
        pgd = pgd_offset(walk->mm, addr);
        do {
-               struct vm_area_struct *vma;
+               struct vm_area_struct *vma = NULL;
 
                next = pgd_addr_end(addr, end);
 
                /*
-                * handle hugetlb vma individually because pagetable walk for
-                * the hugetlb page is dependent on the architecture and
-                * we can't handled it in the same manner as non-huge pages.
+                * This function was not intended to be vma based.
+                * But there are vma special cases to be handled:
+                * - hugetlb vma's
+                * - VM_PFNMAP vma's
                 */
-               vma = hugetlb_vma(addr, walk);
+               vma = find_vma(walk->mm, addr);
                if (vma) {
-                       if (vma->vm_end < next)
+                       /*
+                        * There are no page structures backing a VM_PFNMAP
+                        * range, so do not allow split_huge_page_pmd().
+                        */
+                       if ((vma->vm_start <= addr) &&
+                           (vma->vm_flags & VM_PFNMAP)) {
                                next = vma->vm_end;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                        /*
-                        * Hugepage is very tightly coupled with vma, so
-                        * walk through hugetlb entries within a given vma.
+                        * Handle hugetlb vma individually because pagetable
+                        * walk for the hugetlb page is dependent on the
+                        * architecture and we can't handled it in the same
+                        * manner as non-huge pages.
                         */
-                       err = walk_hugetlb_range(vma, addr, next, walk);
-                       if (err)
-                               break;
-                       pgd = pgd_offset(walk->mm, next);
-                       continue;
+                       if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
+                           is_vm_hugetlb_page(vma)) {
+                               if (vma->vm_end < next)
+                                       next = vma->vm_end;
+                               /*
+                                * Hugepage is very tightly coupled with vma,
+                                * so walk through hugetlb entries within a
+                                * given vma.
+                                */
+                               err = walk_hugetlb_range(vma, addr, next, walk);
+                               if (err)
+                                       break;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                }
 
                if (pgd_none_or_clear_bad(pgd)) {
index b3d40dcf36247975ac05b1f7a7119521e7cb65bb..f24ab0dff554262e1da6866b866a4584871f8d9c 100644 (file)
@@ -336,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                 * Swap entry may have been freed since our caller observed it.
                 */
                err = swapcache_prepare(entry);
-               if (err == -EEXIST) {   /* seems racy */
+               if (err == -EEXIST) {
                        radix_tree_preload_end();
+                       /*
+                        * We might race against get_swap_page() and stumble
+                        * across a SWAP_HAS_CACHE swap_map entry whose page
+                        * has not been brought into the swapcache yet, while
+                        * the other end is scheduled away waiting on discard
+                        * I/O completion at scan_swap_map().
+                        *
+                        * In order to avoid turning this transitory state
+                        * into a permanent loop around this -EEXIST case
+                        * if !CONFIG_PREEMPT and the I/O completion happens
+                        * to be waiting on the CPU waitqueue where we are now
+                        * busy looping, we just conditionally invoke the
+                        * scheduler here, if there are some more important
+                        * tasks to run.
+                        */
+                       cond_resched();
                        continue;
                }
                if (err) {              /* swp entry is obsolete ? */
index 6c340d908b274c7855b3abd0bdeadff56c8ffc92..746af55b8455ce0e9280b81c78e0ee5f2316f5d4 100644 (file)
@@ -2116,7 +2116,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        }
        /* frontswap enabled? set up bit-per-page map for frontswap */
        if (frontswap_enabled)
-               frontswap_map = vzalloc(maxpages / sizeof(long));
+               frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
 
        if (p->bdev) {
                if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
index e085bcc754f602e4401e51c8c29442a1bd143de8..1eb05d80b07bea736e85a389be0d98c8bfcb3d9c 100644 (file)
@@ -871,10 +871,10 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
         */
        del_timer_sync(&app->join_timer);
 
-       spin_lock(&app->lock);
+       spin_lock_bh(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
        mrp_pdu_queue(app);
-       spin_unlock(&app->lock);
+       spin_unlock_bh(&app->lock);
 
        mrp_queue_xmit(app);
 
index 9424f3718ea703adc1019193a30352ad95981e6a..2fb2d88e8c2e329ec11653c20478bede1c4e5ad8 100644 (file)
@@ -341,7 +341,7 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event)
 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                             void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct vlan_group *grp;
        struct vlan_info *vlan_info;
        int i, flgs;
index 8eb75425e6e660a6d087cb2a61890d96138b8106..addc116cecf0be16210498b6544fa40ad24cc615 100644 (file)
@@ -562,36 +562,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
 
        if (!p9_is_proto_dotl(c)) {
                /* Error is reported in string format */
-               uint16_t len;
-               /* 7 = header size for RERROR, 2 is the size of string len; */
-               int inline_len = in_hdrlen - (7 + 2);
+               int len;
+               /* 7 = header size for RERROR; */
+               int inline_len = in_hdrlen - 7;
 
-               /* Read the size of error string */
-               err = p9pdu_readf(req->rc, c->proto_version, "w", &len);
-               if (err)
-                       goto out_err;
-
-               ename = kmalloc(len + 1, GFP_NOFS);
-               if (!ename) {
-                       err = -ENOMEM;
+               len =  req->rc->size - req->rc->offset;
+               if (len > (P9_ZC_HDR_SZ - 7)) {
+                       err = -EFAULT;
                        goto out_err;
                }
-               if (len <= inline_len) {
-                       /* We have error in protocol buffer itself */
-                       if (pdu_read(req->rc, ename, len)) {
-                               err = -EFAULT;
-                               goto out_free;
 
-                       }
-               } else {
-                       /*
-                        *  Part of the data is in user space buffer.
-                        */
-                       if (pdu_read(req->rc, ename, inline_len)) {
-                               err = -EFAULT;
-                               goto out_free;
-
-                       }
+               ename = &req->rc->sdata[req->rc->offset];
+               if (len > inline_len) {
+                       /* We have error in external buffer */
                        if (kern_buf) {
                                memcpy(ename + inline_len, uidata,
                                       len - inline_len);
@@ -600,19 +583,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
                                                     uidata, len - inline_len);
                                if (err) {
                                        err = -EFAULT;
-                                       goto out_free;
+                                       goto out_err;
                                }
                        }
                }
-               ename[len] = 0;
-               if (p9_is_proto_dotu(c)) {
-                       /* For dotu we also have error code */
-                       err = p9pdu_readf(req->rc,
-                                         c->proto_version, "d", &ecode);
-                       if (err)
-                               goto out_free;
+               ename = NULL;
+               err = p9pdu_readf(req->rc, c->proto_version, "s?d",
+                                 &ename, &ecode);
+               if (err)
+                       goto out_err;
+
+               if (p9_is_proto_dotu(c))
                        err = -ecode;
-               }
+
                if (!err || !IS_ERR_VALUE(err)) {
                        err = p9_errstr2errno(ename, strlen(ename));
 
@@ -628,8 +611,6 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
        }
        return err;
 
-out_free:
-       kfree(ename);
 out_err:
        p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
        return err;
index 2ddc9046868e7a19d06841054696773242099ccf..51da83943847be9ae8234836daec95f6e72a57d9 100644 (file)
@@ -218,6 +218,7 @@ source "net/batman-adv/Kconfig"
 source "net/openvswitch/Kconfig"
 source "net/vmw_vsock/Kconfig"
 source "net/netlink/Kconfig"
+source "net/mpls/Kconfig"
 
 config RPS
        boolean
@@ -242,6 +243,10 @@ config NETPRIO_CGROUP
          Cgroup subsystem for use in assigning processes to network priorities on
          a per-interface basis
 
+config NET_LL_RX_POLL
+       boolean
+       default y
+
 config BQL
        boolean
        depends on SYSFS
@@ -259,6 +264,18 @@ config BPF_JIT
          packet sniffing (libpcap/tcpdump). Note : Admin should enable
          this feature changing /proc/sys/net/core/bpf_jit_enable
 
+config NET_FLOW_LIMIT
+       boolean
+       depends on RPS
+       default y
+       ---help---
+         The network stack has to drop packets when a receive processing CPU's
+         backlog reaches netdev_max_backlog. If a few out of many active flows
+         generate the vast majority of load, drop their traffic earlier to
+         maintain capacity for the other flows. This feature provides servers
+         with many clients some protection against DoS by a single (spoofed)
+         flow that greatly exceeds average workload.
+
 menu "Network testing"
 
 config NET_PKTGEN
index 091e7b04f301539036dca468d045d0615a33f4b4..9492e8cb64e9e467412aaf25a4c677d84ac6ac62 100644 (file)
@@ -70,3 +70,4 @@ obj-$(CONFIG_BATMAN_ADV)      += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
 obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
 obj-$(CONFIG_VSOCKETS) += vmw_vsock/
+obj-$(CONFIG_NET_MPLS_GSO)     += mpls/
index 173a2e82f486b4ee19411b18842e61f57b05a58c..690356fa52b99a2d66a8522c448c51ba7eedba5f 100644 (file)
@@ -332,7 +332,7 @@ static void aarp_expire_timeout(unsigned long unused)
 static int aarp_device_event(struct notifier_block *this, unsigned long event,
                             void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        int ct;
 
        if (!net_eq(dev_net(dev), &init_net))
index ef12839a7cfe55b6ee99b2be667c0cd9fb829f79..7fee50d637f956240a6146b342e7723a131ce4bd 100644 (file)
@@ -644,7 +644,7 @@ static inline void atalk_dev_down(struct net_device *dev)
 static int ddp_device_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
index 8ae3a7879335f28cc082ea6c2a3bf8da2b8820a3..8215f7cb170b5a21cdb67347f50578f57e515dfb 100644 (file)
@@ -539,9 +539,9 @@ static int clip_create(int number)
 }
 
 static int clip_device_event(struct notifier_block *this, unsigned long event,
-                            void *arg)
+                            void *ptr)
 {
-       struct net_device *dev = arg;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
@@ -575,6 +575,7 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
                           void *ifa)
 {
        struct in_device *in_dev;
+       struct netdev_notifier_info info;
 
        in_dev = ((struct in_ifaddr *)ifa)->ifa_dev;
        /*
@@ -583,7 +584,8 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
         */
        if (event != NETDEV_UP)
                return NOTIFY_DONE;
-       return clip_device_event(this, NETDEV_CHANGE, in_dev->dev);
+       netdev_notifier_info_init(&info, in_dev->dev);
+       return clip_device_event(this, NETDEV_CHANGE, &info);
 }
 
 static struct notifier_block clip_dev_notifier = {
index d4cc1be5c36469ea2b24b7dbb9375e364e6986b0..3af12755cd04851147b8e4661fd312e29abd0691 100644 (file)
@@ -998,14 +998,12 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
 }
 
 static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
-                              unsigned long event, void *dev_ptr)
+                              unsigned long event, void *ptr)
 {
-       struct net_device *dev;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct mpoa_client *mpc;
        struct lec_priv *priv;
 
-       dev = dev_ptr;
-
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
 
index e277e38f736b93a133a6cd43cc7b264acfbad641..4b4d2b779ec1a08202303863fea1b74c67f04383 100644 (file)
@@ -111,9 +111,9 @@ again:
  *     Handle device status changes.
  */
 static int ax25_device_event(struct notifier_block *this, unsigned long event,
-       void *ptr)
+                            void *ptr)
 {
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
@@ -1974,7 +1974,7 @@ static struct packet_type ax25_packet_type __read_mostly = {
 };
 
 static struct notifier_block ax25_dev_notifier = {
-       .notifier_call =ax25_device_event,
+       .notifier_call = ax25_device_event,
 };
 
 static int __init ax25_init(void)
index d5744b7525118d843d770e5fb9c5299ae94727a9..919a5ce47515b17c63976bc7ee001b657760c606 100644 (file)
@@ -29,7 +29,7 @@ static int min_proto[1],              max_proto[] = { AX25_PROTO_MAX };
 static int min_ds_timeout[1],          max_ds_timeout[] = {65535000};
 #endif
 
-static const ctl_table ax25_param_table[] = {
+static const struct ctl_table ax25_param_table[] = {
        {
                .procname       = "ip_default_mode",
                .maxlen         = sizeof(int),
index acbac2a9c62fc6e0c70f893f80da00ea5503cdf9..489bb36f1b9464381d2aa83c78740977e3899b77 100644 (file)
@@ -32,7 +32,6 @@ batman-adv-y += icmp_socket.o
 batman-adv-y += main.o
 batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
 batman-adv-y += originator.o
-batman-adv-y += ring_buffer.o
 batman-adv-y += routing.o
 batman-adv-y += send.o
 batman-adv-y += soft-interface.o
index 071f288b77a86c8c88b6eadd9634a24cdb209e81..62da5278014a77e1e8a60902541b1999b2f85964 100644 (file)
@@ -19,7 +19,6 @@
 
 #include "main.h"
 #include "translation-table.h"
-#include "ring_buffer.h"
 #include "originator.h"
 #include "routing.h"
 #include "gateway_common.h"
 #include "bat_algo.h"
 #include "network-coding.h"
 
+/**
+ * batadv_ring_buffer_set - update the ring buffer with the given value
+ * @lq_recv: pointer to the ring buffer
+ * @lq_index: index to store the value at
+ * @value: value to store in the ring buffer
+ */
+static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
+                                  uint8_t value)
+{
+       lq_recv[*lq_index] = value;
+       *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
+}
+
+/**
+ * batadv_ring_buffer_set - compute the average of all non-zero values stored
+ * in the given ring buffer
+ * @lq_recv: pointer to the ring buffer
+ *
+ * Returns computed average value.
+ */
+static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
+{
+       const uint8_t *ptr;
+       uint16_t count = 0, i = 0, sum = 0;
+
+       ptr = lq_recv;
+
+       while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
+               if (*ptr != 0) {
+                       count++;
+                       sum += *ptr;
+               }
+
+               i++;
+               ptr++;
+       }
+
+       if (count == 0)
+               return 0;
+
+       return (uint8_t)(sum / count);
+}
+
+/*
+ * batadv_dup_status - duplicate status
+ * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
+ *  neighbor)
+ * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
+ * @BATADV_PROTECTED: originator is currently protected (after reboot)
+ */
+enum batadv_dup_status {
+       BATADV_NO_DUP = 0,
+       BATADV_ORIG_DUP,
+       BATADV_NEIGH_DUP,
+       BATADV_PROTECTED,
+};
+
 static struct batadv_neigh_node *
 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
                        const uint8_t *neigh_addr,
                        struct batadv_orig_node *orig_node,
-                       struct batadv_orig_node *orig_neigh, __be32 seqno)
+                       struct batadv_orig_node *orig_neigh)
 {
        struct batadv_neigh_node *neigh_node;
 
-       neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr,
-                                          ntohl(seqno));
+       neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr);
        if (!neigh_node)
                goto out;
 
@@ -413,18 +469,16 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
        else
                skb_size = packet_len;
 
-       skb_size += ETH_HLEN + NET_IP_ALIGN;
+       skb_size += ETH_HLEN;
 
-       forw_packet_aggr->skb = dev_alloc_skb(skb_size);
+       forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
        if (!forw_packet_aggr->skb) {
                if (!own_packet)
                        atomic_inc(&bat_priv->batman_queue_left);
                kfree(forw_packet_aggr);
                goto out;
        }
-       skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN);
-
-       INIT_HLIST_NODE(&forw_packet_aggr->list);
+       skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
 
        skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
        forw_packet_aggr->packet_len = packet_len;
@@ -590,6 +644,41 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
                                if_incoming, 0, batadv_iv_ogm_fwd_send_time());
 }
 
+/**
+ * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for
+ * the given interface
+ * @hard_iface: the interface for which the windows have to be shifted
+ */
+static void
+batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
+{
+       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+       struct batadv_hashtable *hash = bat_priv->orig_hash;
+       struct hlist_head *head;
+       struct batadv_orig_node *orig_node;
+       unsigned long *word;
+       uint32_t i;
+       size_t word_index;
+       uint8_t *w;
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+                       spin_lock_bh(&orig_node->ogm_cnt_lock);
+                       word_index = hard_iface->if_num * BATADV_NUM_WORDS;
+                       word = &(orig_node->bcast_own[word_index]);
+
+                       batadv_bit_get_packet(bat_priv, word, 1, 0);
+                       w = &orig_node->bcast_own_sum[hard_iface->if_num];
+                       *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
+                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+               }
+               rcu_read_unlock();
+       }
+}
+
 static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -634,7 +723,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
                batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
        }
 
-       batadv_slide_own_bcast_window(hard_iface);
+       batadv_iv_ogm_slide_own_bcast_window(hard_iface);
        batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
                                hard_iface->bat_iv.ogm_buff_len, hard_iface, 1,
                                batadv_iv_ogm_emit_send_time(bat_priv));
@@ -650,7 +739,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                          const struct batadv_ogm_packet *batadv_ogm_packet,
                          struct batadv_hard_iface *if_incoming,
                          const unsigned char *tt_buff,
-                         int is_duplicate)
+                         enum batadv_dup_status dup_status)
 {
        struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
        struct batadv_neigh_node *router = NULL;
@@ -670,13 +759,13 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
                    tmp_neigh_node->if_incoming == if_incoming &&
                    atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
-                       if (neigh_node)
+                       if (WARN(neigh_node, "too many matching neigh_nodes"))
                                batadv_neigh_node_free_ref(neigh_node);
                        neigh_node = tmp_neigh_node;
                        continue;
                }
 
-               if (is_duplicate)
+               if (dup_status != BATADV_NO_DUP)
                        continue;
 
                spin_lock_bh(&tmp_neigh_node->lq_update_lock);
@@ -696,8 +785,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
 
                neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
                                                     ethhdr->h_source,
-                                                    orig_node, orig_tmp,
-                                                    batadv_ogm_packet->seqno);
+                                                    orig_node, orig_tmp);
 
                batadv_orig_node_free_ref(orig_tmp);
                if (!neigh_node)
@@ -718,7 +806,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
        spin_unlock_bh(&neigh_node->lq_update_lock);
 
-       if (!is_duplicate) {
+       if (dup_status == BATADV_NO_DUP) {
                orig_node->last_ttl = batadv_ogm_packet->header.ttl;
                neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
        }
@@ -829,8 +917,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
                neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
                                                     orig_neigh_node->orig,
                                                     orig_neigh_node,
-                                                    orig_neigh_node,
-                                                    batadv_ogm_packet->seqno);
+                                                    orig_neigh_node);
 
        if (!neigh_node)
                goto out;
@@ -902,15 +989,16 @@ out:
        return ret;
 }
 
-/* processes a batman packet for all interfaces, adjusts the sequence number and
- * finds out whether it is a duplicate.
- * returns:
- *   1 the packet is a duplicate
- *   0 the packet has not yet been received
- *  -1 the packet is old and has been received while the seqno window
- *     was protected. Caller should drop it.
+/**
+ * batadv_iv_ogm_update_seqnos -  process a batman packet for all interfaces,
+ *  adjust the sequence number and find out whether it is a duplicate
+ * @ethhdr: ethernet header of the packet
+ * @batadv_ogm_packet: OGM packet to be considered
+ * @if_incoming: interface on which the OGM packet was received
+ *
+ * Returns duplicate status as enum batadv_dup_status
  */
-static int
+static enum batadv_dup_status
 batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                            const struct batadv_ogm_packet *batadv_ogm_packet,
                            const struct batadv_hard_iface *if_incoming)
@@ -918,17 +1006,18 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct batadv_orig_node *orig_node;
        struct batadv_neigh_node *tmp_neigh_node;
-       int is_duplicate = 0;
+       int is_dup;
        int32_t seq_diff;
        int need_update = 0;
-       int set_mark, ret = -1;
+       int set_mark;
+       enum batadv_dup_status ret = BATADV_NO_DUP;
        uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
        uint8_t *neigh_addr;
        uint8_t packet_count;
 
        orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
-               return 0;
+               return BATADV_NO_DUP;
 
        spin_lock_bh(&orig_node->ogm_cnt_lock);
        seq_diff = seqno - orig_node->last_real_seqno;
@@ -936,22 +1025,29 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        /* signalize caller that the packet is to be dropped. */
        if (!hlist_empty(&orig_node->neigh_list) &&
            batadv_window_protected(bat_priv, seq_diff,
-                                   &orig_node->batman_seqno_reset))
+                                   &orig_node->batman_seqno_reset)) {
+               ret = BATADV_PROTECTED;
                goto out;
+       }
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(tmp_neigh_node,
                                 &orig_node->neigh_list, list) {
-               is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
-                                               orig_node->last_real_seqno,
-                                               seqno);
-
                neigh_addr = tmp_neigh_node->addr;
+               is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
+                                        orig_node->last_real_seqno,
+                                        seqno);
+
                if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
-                   tmp_neigh_node->if_incoming == if_incoming)
+                   tmp_neigh_node->if_incoming == if_incoming) {
                        set_mark = 1;
-               else
+                       if (is_dup)
+                               ret = BATADV_NEIGH_DUP;
+               } else {
                        set_mark = 0;
+                       if (is_dup && (ret != BATADV_NEIGH_DUP))
+                               ret = BATADV_ORIG_DUP;
+               }
 
                /* if the window moved, set the update flag. */
                need_update |= batadv_bit_get_packet(bat_priv,
@@ -971,8 +1067,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                orig_node->last_real_seqno = seqno;
        }
 
-       ret = is_duplicate;
-
 out:
        spin_unlock_bh(&orig_node->ogm_cnt_lock);
        batadv_orig_node_free_ref(orig_node);
@@ -991,10 +1085,11 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
        struct batadv_neigh_node *orig_neigh_router = NULL;
        int has_directlink_flag;
        int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
-       int is_broadcast = 0, is_bidirect;
+       int is_bidirect;
        bool is_single_hop_neigh = false;
        bool is_from_best_next_hop = false;
-       int is_duplicate, sameseq, simlar_ttl;
+       int sameseq, similar_ttl;
+       enum batadv_dup_status dup_status;
        uint32_t if_incoming_seqno;
        uint8_t *prev_sender;
 
@@ -1054,19 +1149,9 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                if (batadv_compare_eth(batadv_ogm_packet->prev_sender,
                                       hard_iface->net_dev->dev_addr))
                        is_my_oldorig = 1;
-
-               if (is_broadcast_ether_addr(ethhdr->h_source))
-                       is_broadcast = 1;
        }
        rcu_read_unlock();
 
-       if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
-               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Drop packet: incompatible batman version (%i)\n",
-                          batadv_ogm_packet->header.version);
-               return;
-       }
-
        if (is_my_addr) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Drop packet: received my own broadcast (sender: %pM)\n",
@@ -1074,13 +1159,6 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                return;
        }
 
-       if (is_broadcast) {
-               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
-                          ethhdr->h_source);
-               return;
-       }
-
        if (is_my_orig) {
                unsigned long *word;
                int offset;
@@ -1138,10 +1216,10 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
        if (!orig_node)
                return;
 
-       is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
-                                                  if_incoming);
+       dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
+                                                if_incoming);
 
-       if (is_duplicate == -1) {
+       if (dup_status == BATADV_PROTECTED) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Drop packet: packet within seqno protection time (sender: %pM)\n",
                           ethhdr->h_source);
@@ -1211,11 +1289,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
         * seqno and similar ttl as the non-duplicate
         */
        sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
-       simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
-       if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
+       similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+       if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
+                           (sameseq && similar_ttl)))
                batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
                                          batadv_ogm_packet, if_incoming,
-                                         tt_buff, is_duplicate);
+                                         tt_buff, dup_status);
 
        /* is single hop (direct) neighbor */
        if (is_single_hop_neigh) {
@@ -1236,7 +1315,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                goto out_neigh;
        }
 
-       if (is_duplicate) {
+       if (dup_status == BATADV_NEIGH_DUP) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Drop packet: duplicate packet received\n");
                goto out_neigh;
@@ -1288,7 +1367,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
                           skb->len + ETH_HLEN);
 
        packet_len = skb_headlen(skb);
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
        packet_buff = skb->data;
        batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
 
index 379061c725491ec0801940265aedd21245e4ccf1..e14531f1ce1c5258ccc816f9a739ed464b6c364f 100644 (file)
@@ -180,7 +180,7 @@ static struct batadv_bla_claim
  */
 static struct batadv_bla_backbone_gw *
 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
-                         uint8_t *addr, short vid)
+                         uint8_t *addr, unsigned short vid)
 {
        struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
@@ -257,7 +257,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
  * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
  */
 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
-                                 short vid, int claimtype)
+                                 unsigned short vid, int claimtype)
 {
        struct sk_buff *skb;
        struct ethhdr *ethhdr;
@@ -307,7 +307,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                 */
                memcpy(ethhdr->h_source, mac, ETH_ALEN);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
-                          "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
+                          "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
+                          BATADV_PRINT_VID(vid));
                break;
        case BATADV_CLAIM_TYPE_UNCLAIM:
                /* unclaim frame
@@ -316,7 +317,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                memcpy(hw_src, mac, ETH_ALEN);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
-                          vid);
+                          BATADV_PRINT_VID(vid));
                break;
        case BATADV_CLAIM_TYPE_ANNOUNCE:
                /* announcement frame
@@ -325,7 +326,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                memcpy(hw_src, mac, ETH_ALEN);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
-                          ethhdr->h_source, vid);
+                          ethhdr->h_source, BATADV_PRINT_VID(vid));
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
                /* request frame
@@ -335,13 +336,15 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                memcpy(hw_src, mac, ETH_ALEN);
                memcpy(ethhdr->h_dest, mac, ETH_ALEN);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
-                          "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
-                          ethhdr->h_source, ethhdr->h_dest, vid);
+                          "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
+                          ethhdr->h_source, ethhdr->h_dest,
+                          BATADV_PRINT_VID(vid));
                break;
        }
 
-       if (vid != -1)
-               skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid);
+       if (vid & BATADV_VLAN_HAS_TAG)
+               skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
+                                     vid & VLAN_VID_MASK);
 
        skb_reset_mac_header(skb);
        skb->protocol = eth_type_trans(skb, soft_iface);
@@ -367,7 +370,7 @@ out:
  */
 static struct batadv_bla_backbone_gw *
 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
-                          short vid, bool own_backbone)
+                          unsigned short vid, bool own_backbone)
 {
        struct batadv_bla_backbone_gw *entry;
        struct batadv_orig_node *orig_node;
@@ -380,7 +383,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
-                  orig, vid);
+                  orig, BATADV_PRINT_VID(vid));
 
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (!entry)
@@ -434,7 +437,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
 static void
 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
                                  struct batadv_hard_iface *primary_if,
-                                 short vid)
+                                 unsigned short vid)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -456,7 +459,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
  */
 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
                                      struct batadv_hard_iface *primary_if,
-                                     short vid)
+                                     unsigned short vid)
 {
        struct hlist_head *head;
        struct batadv_hashtable *hash;
@@ -547,7 +550,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
  * @backbone_gw: the backbone gateway which claims it
  */
 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
-                                const uint8_t *mac, const short vid,
+                                const uint8_t *mac, const unsigned short vid,
                                 struct batadv_bla_backbone_gw *backbone_gw)
 {
        struct batadv_bla_claim *claim;
@@ -572,7 +575,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                atomic_set(&claim->refcount, 2);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
-                          mac, vid);
+                          mac, BATADV_PRINT_VID(vid));
                hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
                                             batadv_compare_claim,
                                             batadv_choose_claim, claim,
@@ -591,7 +594,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
 
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_add_claim(): changing ownership for %pM, vid %d\n",
-                          mac, vid);
+                          mac, BATADV_PRINT_VID(vid));
 
                claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
                batadv_backbone_gw_free_ref(claim->backbone_gw);
@@ -611,7 +614,7 @@ claim_free_ref:
  * given mac address and vid.
  */
 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
-                                const uint8_t *mac, const short vid)
+                                const uint8_t *mac, const unsigned short vid)
 {
        struct batadv_bla_claim search_claim, *claim;
 
@@ -622,7 +625,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                return;
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
-                  mac, vid);
+                  mac, BATADV_PRINT_VID(vid));
 
        batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
                           batadv_choose_claim, claim);
@@ -637,7 +640,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 /* check for ANNOUNCE frame, return 1 if handled */
 static int batadv_handle_announce(struct batadv_priv *bat_priv,
                                  uint8_t *an_addr, uint8_t *backbone_addr,
-                                 short vid)
+                                 unsigned short vid)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
        uint16_t crc;
@@ -658,12 +661,13 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
-                  vid, backbone_gw->orig, crc);
+                  BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
 
        if (backbone_gw->crc != crc) {
                batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
                           "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
-                          backbone_gw->orig, backbone_gw->vid,
+                          backbone_gw->orig,
+                          BATADV_PRINT_VID(backbone_gw->vid),
                           backbone_gw->crc, crc);
 
                batadv_bla_send_request(backbone_gw);
@@ -685,7 +689,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
 static int batadv_handle_request(struct batadv_priv *bat_priv,
                                 struct batadv_hard_iface *primary_if,
                                 uint8_t *backbone_addr,
-                                struct ethhdr *ethhdr, short vid)
+                                struct ethhdr *ethhdr, unsigned short vid)
 {
        /* check for REQUEST frame */
        if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
@@ -699,7 +703,7 @@ static int batadv_handle_request(struct batadv_priv *bat_priv,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
-                  vid, ethhdr->h_source);
+                  BATADV_PRINT_VID(vid), ethhdr->h_source);
 
        batadv_bla_answer_request(bat_priv, primary_if, vid);
        return 1;
@@ -709,7 +713,7 @@ static int batadv_handle_request(struct batadv_priv *bat_priv,
 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
                                 struct batadv_hard_iface *primary_if,
                                 uint8_t *backbone_addr,
-                                uint8_t *claim_addr, short vid)
+                                uint8_t *claim_addr, unsigned short vid)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -727,7 +731,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
        /* this must be an UNCLAIM frame */
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
-                  claim_addr, vid, backbone_gw->orig);
+                  claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
 
        batadv_bla_del_claim(bat_priv, claim_addr, vid);
        batadv_backbone_gw_free_ref(backbone_gw);
@@ -738,7 +742,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
 static int batadv_handle_claim(struct batadv_priv *bat_priv,
                               struct batadv_hard_iface *primary_if,
                               uint8_t *backbone_addr, uint8_t *claim_addr,
-                              short vid)
+                              unsigned short vid)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -861,14 +865,15 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        struct batadv_bla_claim_dst *bla_dst;
        uint16_t proto;
        int headlen;
-       short vid = -1;
+       unsigned short vid = BATADV_NO_FLAGS;
        int ret;
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
                vhdr = (struct vlan_ethhdr *)ethhdr;
                vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+               vid |= BATADV_VLAN_HAS_TAG;
                proto = ntohs(vhdr->h_vlan_encapsulated_proto);
                headlen = sizeof(*vhdr);
        } else {
@@ -885,7 +890,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                return 0;
 
        /* pskb_may_pull() may have modified the pointers, get ethhdr again */
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
        arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
 
        /* Check whether the ARP frame carries a valid
@@ -910,7 +915,8 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        if (ret == 1)
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
-                          ethhdr->h_source, vid, hw_src, hw_dst);
+                          ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
+                          hw_dst);
 
        if (ret < 2)
                return ret;
@@ -945,7 +951,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
-                  ethhdr->h_source, vid, hw_src, hw_dst);
+                  ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
        return 1;
 }
 
@@ -1067,6 +1073,10 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
        group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
        bat_priv->bla.claim_dest.group = group;
 
+       /* purge everything when bridge loop avoidance is turned off */
+       if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+               oldif = NULL;
+
        if (!oldif) {
                batadv_bla_purge_claims(bat_priv, NULL, 1);
                batadv_bla_purge_backbone_gw(bat_priv, 1);
@@ -1358,7 +1368,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
        struct ethhdr *ethhdr;
        struct vlan_ethhdr *vhdr;
        struct batadv_bla_backbone_gw *backbone_gw;
-       short vid = -1;
+       unsigned short vid = BATADV_NO_FLAGS;
 
        if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
                return 0;
@@ -1375,6 +1385,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 
                vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
                vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+               vid |= BATADV_VLAN_HAS_TAG;
        }
 
        /* see if this originator is a backbone gw for this VLAN */
@@ -1424,15 +1435,15 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
  * returns 1, otherwise it returns 0 and the caller shall further
  * process the skb.
  */
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
-                 bool is_bcast)
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                 unsigned short vid, bool is_bcast)
 {
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
        struct batadv_hard_iface *primary_if;
        int ret;
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
@@ -1519,7 +1530,8 @@ out:
  * returns 1, otherwise it returns 0 and the caller shall further
  * process the skb.
  */
-int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
+int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                 unsigned short vid)
 {
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
@@ -1539,7 +1551,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
        if (batadv_bla_process_claim(bat_priv, primary_if, skb))
                goto handled;
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
@@ -1623,8 +1635,8 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
                hlist_for_each_entry_rcu(claim, head, hash_entry) {
                        is_own = batadv_compare_eth(claim->backbone_gw->orig,
                                                    primary_addr);
-                       seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
-                                  claim->addr, claim->vid,
+                       seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
+                                  claim->addr, BATADV_PRINT_VID(claim->vid),
                                   claim->backbone_gw->orig,
                                   (is_own ? 'x' : ' '),
                                   claim->backbone_gw->crc);
@@ -1676,10 +1688,10 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
                        if (is_own)
                                continue;
 
-                       seq_printf(seq,
-                                  " * %pM on % 5d % 4i.%03is (%#.4x)\n",
-                                  backbone_gw->orig, backbone_gw->vid,
-                                  secs, msecs, backbone_gw->crc);
+                       seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
+                                  backbone_gw->orig,
+                                  BATADV_PRINT_VID(backbone_gw->vid), secs,
+                                  msecs, backbone_gw->crc);
                }
                rcu_read_unlock();
        }
index dea2fbc5d98d00d020608db19e51e830501c93a7..4b102e71e5bd63c2bee71f14bcdb0b799398ef5e 100644 (file)
 #define _NET_BATMAN_ADV_BLA_H_
 
 #ifdef CONFIG_BATMAN_ADV_BLA
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
-                 bool is_bcast);
-int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                 unsigned short vid, bool is_bcast);
+int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                 unsigned short vid);
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size);
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
@@ -42,13 +43,14 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
-                               struct sk_buff *skb, short vid, bool is_bcast)
+                               struct sk_buff *skb, unsigned short vid,
+                               bool is_bcast)
 {
        return 0;
 }
 
 static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
-                               struct sk_buff *skb, short vid)
+                               struct sk_buff *skb, unsigned short vid)
 {
        return 0;
 }
index 8e15d966d9b0a9f30e1010c242c440e206f73ea2..06345d401588c949762edfa14aaeb3f018a8e294 100644 (file)
@@ -45,9 +45,9 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_entry_free_ref - decrements the dat_entry refcounter and possibly
+ * batadv_dat_entry_free_ref - decrement the dat_entry refcounter and possibly
  * free it
- * @dat_entry: the oentry to free
+ * @dat_entry: the entry to free
  */
 static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry)
 {
@@ -56,10 +56,10 @@ static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * batadv_dat_to_purge - checks whether a dat_entry has to be purged or not
+ * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
  * @dat_entry: the entry to check
  *
- * Returns true if the entry has to be purged now, false otherwise
+ * Returns true if the entry has to be purged now, false otherwise.
  */
 static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
 {
@@ -75,8 +75,8 @@ static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
  *           returns a boolean value: true is the entry has to be deleted,
  *           false otherwise
  *
- * Loops over each entry in the DAT local storage and delete it if and only if
- * the to_purge function passed as argument returns true
+ * Loops over each entry in the DAT local storage and deletes it if and only if
+ * the to_purge function passed as argument returns true.
  */
 static void __batadv_dat_purge(struct batadv_priv *bat_priv,
                               bool (*to_purge)(struct batadv_dat_entry *))
@@ -97,7 +97,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
                spin_lock_bh(list_lock);
                hlist_for_each_entry_safe(dat_entry, node_tmp, head,
                                          hash_entry) {
-                       /* if an helper function has been passed as parameter,
+                       /* if a helper function has been passed as parameter,
                         * ask it if the entry has to be purged or not
                         */
                        if (to_purge && !to_purge(dat_entry))
@@ -134,7 +134,7 @@ static void batadv_dat_purge(struct work_struct *work)
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Returns 1 if the two entry are the same, 0 otherwise
+ * Returns 1 if the two entries are the same, 0 otherwise.
  */
 static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
 {
@@ -149,7 +149,7 @@ static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the hw_src field in the ARP packet
+ * Returns the value of the hw_src field in the ARP packet.
  */
 static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 {
@@ -166,7 +166,7 @@ static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the ip_src field in the ARP packet
+ * Returns the value of the ip_src field in the ARP packet.
  */
 static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
 {
@@ -178,7 +178,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the hw_dst field in the ARP packet
+ * Returns the value of the hw_dst field in the ARP packet.
  */
 static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 {
@@ -190,7 +190,7 @@ static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the ip_dst field in the ARP packet
+ * Returns the value of the ip_dst field in the ARP packet.
  */
 static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
 {
@@ -202,7 +202,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
  * @data: data to hash
  * @size: size of the hash table
  *
- * Returns the selected index in the hash table for the given data
+ * Returns the selected index in the hash table for the given data.
  */
 static uint32_t batadv_hash_dat(const void *data, uint32_t size)
 {
@@ -224,12 +224,12 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size)
 }
 
 /**
- * batadv_dat_entry_hash_find - looks for a given dat_entry in the local hash
+ * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
  *
- * Returns the dat_entry if found, NULL otherwise
+ * Returns the dat_entry if found, NULL otherwise.
  */
 static struct batadv_dat_entry *
 batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
@@ -343,9 +343,6 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
        if (hdr_size == 0)
                return;
 
-       /* if the ARP packet is encapsulated in a batman packet, let's print
-        * some debug messages
-        */
        unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
 
        switch (unicast_4addr_packet->u.header.packet_type) {
@@ -409,7 +406,8 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * @candidate: orig_node under evaluation
  * @max_orig_node: last selected candidate
  *
- * Returns true if the node has been elected as next candidate or false othrwise
+ * Returns true if the node has been elected as next candidate or false
+ * otherwise.
  */
 static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
                                         int select, batadv_dat_addr_t tmp_max,
@@ -472,7 +470,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
         */
        cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;
 
-       /* iterate over the originator list and find the node with closest
+       /* iterate over the originator list and find the node with the closest
         * dat_address which has not been selected yet
         */
        for (i = 0; i < hash->size; i++) {
@@ -480,7 +478,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       /* the dht space is a ring and addresses are unsigned */
+                       /* the dht space is a ring using unsigned addresses */
                        tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
                                  ip_key;
 
@@ -512,7 +510,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_select_candidates - selects the nodes which the DHT message has to
+ * batadv_dat_select_candidates - select the nodes which the DHT message has to
  * be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
@@ -521,7 +519,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * closest values (from the LEFT, with wrap around if needed) then the hash
  * value of the key. ip_dst is the key.
  *
- * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM
+ * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
 batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
@@ -558,10 +556,11 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  * @ip: the DHT key
  * @packet_subtype: unicast4addr packet subtype to use
  *
- * In this function the skb is copied by means of pskb_copy() and is sent as
- * unicast packet to each of the selected candidates
+ * This function copies the skb with pskb_copy() and is sent as unicast packet
+ * to each of the selected candidates.
  *
- * Returns true if the packet is sent to at least one candidate, false otherwise
+ * Returns true if the packet is sent to at least one candidate, false
+ * otherwise.
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
                                 struct sk_buff *skb, __be32 ip,
@@ -727,7 +726,7 @@ out:
  * @skb: packet to analyse
  * @hdr_size: size of the possible header before the ARP packet in the skb
  *
- * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise
+ * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise.
  */
 static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
                                    struct sk_buff *skb, int hdr_size)
@@ -754,9 +753,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
 
        arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN);
 
-       /* Check whether the ARP packet carries a valid
-        * IP information
-        */
+       /* check whether the ARP packet carries a valid IP information */
        if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
                goto out;
 
@@ -784,7 +781,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
        if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
                goto out;
 
-       /* we don't care about the destination MAC address in ARP requests */
+       /* don't care about the destination MAC address in ARP requests */
        if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
                hw_dst = batadv_arp_hw_dst(skb, hdr_size);
                if (is_zero_ether_addr(hw_dst) ||
@@ -804,8 +801,8 @@ out:
  * @skb: packet to check
  *
  * Returns true if the message has been sent to the dht candidates, false
- * otherwise. In case of true the message has to be enqueued to permit the
- * fallback
+ * otherwise. In case of a positive return value the message has to be enqueued
+ * to permit the fallback.
  */
 bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb)
@@ -837,6 +834,19 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 
        dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
        if (dat_entry) {
+               /* If the ARP request is destined for a local client the local
+                * client will answer itself. DAT would only generate a
+                * duplicate packet.
+                *
+                * Moreover, if the soft-interface is enslaved into a bridge, an
+                * additional DAT answer may trigger kernel warnings about
+                * a packet coming from the wrong port.
+                */
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+                       ret = true;
+                       goto out;
+               }
+
                skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
                                     bat_priv->soft_iface, ip_dst, hw_src,
                                     dat_entry->mac_addr, hw_src);
@@ -854,7 +864,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
                ret = true;
        } else {
-               /* Send the request on the DHT */
+               /* Send the request to the DHT */
                ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
                                           BATADV_P_DAT_DHT_GET);
        }
@@ -871,7 +881,7 @@ out:
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
  *
- * Returns true if the request has been answered, false otherwise
+ * Returns true if the request has been answered, false otherwise.
  */
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb, int hdr_size)
@@ -911,10 +921,9 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        if (!skb_new)
                goto out;
 
-       /* to preserve backwards compatibility, here the node has to answer
-        * using the same packet type it received for the request. This is due
-        * to that if a node is not using the 4addr packet format it may not
-        * support it.
+       /* To preserve backwards compatibility, the node has choose the outgoing
+        * format based on the incoming request packet type. The assumption is
+        * that a node not using the 4addr packet format doesn't support it.
         */
        if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
                err = batadv_unicast_4addr_send_skb(bat_priv, skb_new,
@@ -964,7 +973,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
 
        /* Send the ARP reply to the candidates for both the IP addresses that
-        * the node got within the ARP reply
+        * the node obtained from the ARP reply
         */
        batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
        batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
@@ -974,7 +983,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
  * DAT storage only
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
- * @hdr_size: siaze of the encapsulation header
+ * @hdr_size: size of the encapsulation header
  */
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb, int hdr_size)
@@ -1018,11 +1027,11 @@ out:
 
 /**
  * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
- * (because the node has already got the reply via DAT) or not
+ * (because the node has already obtained the reply via DAT) or not
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the broadcast packet
  *
- * Returns true if the node can drop the packet, false otherwise
+ * Returns true if the node can drop the packet, false otherwise.
  */
 bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
                                      struct batadv_forw_packet *forw_packet)
index 522243aff2f3b4a45895efea0752405bbee95fa9..c478e6bcf89b8bd570a54a9a811b4309c7a2e5f0 100644 (file)
@@ -117,6 +117,58 @@ static int batadv_is_valid_iface(const struct net_device *net_dev)
        return 1;
 }
 
+/**
+ * batadv_is_wifi_netdev - check if the given net_device struct is a wifi
+ *  interface
+ * @net_device: the device to check
+ *
+ * Returns true if the net device is a 802.11 wireless device, false otherwise.
+ */
+static bool batadv_is_wifi_netdev(struct net_device *net_device)
+{
+#ifdef CONFIG_WIRELESS_EXT
+       /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
+        * check for wireless_handlers != NULL
+        */
+       if (net_device->wireless_handlers)
+               return true;
+#endif
+
+       /* cfg80211 drivers have to set ieee80211_ptr */
+       if (net_device->ieee80211_ptr)
+               return true;
+
+       return false;
+}
+
+/**
+ * batadv_is_wifi_iface - check if the given interface represented by ifindex
+ *  is a wifi interface
+ * @ifindex: interface index to check
+ *
+ * Returns true if the interface represented by ifindex is a 802.11 wireless
+ * device, false otherwise.
+ */
+bool batadv_is_wifi_iface(int ifindex)
+{
+       struct net_device *net_device = NULL;
+       bool ret = false;
+
+       if (ifindex == BATADV_NULL_IFINDEX)
+               goto out;
+
+       net_device = dev_get_by_index(&init_net, ifindex);
+       if (!net_device)
+               goto out;
+
+       ret = batadv_is_wifi_netdev(net_device);
+
+out:
+       if (net_device)
+               dev_put(net_device);
+       return ret;
+}
+
 static struct batadv_hard_iface *
 batadv_hardif_get_active(const struct net_device *soft_iface)
 {
@@ -525,7 +577,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
 
        dev_hold(net_dev);
 
-       hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
+       hard_iface = kzalloc(sizeof(*hard_iface), GFP_ATOMIC);
        if (!hard_iface)
                goto release_dev;
 
@@ -541,18 +593,16 @@ batadv_hardif_add_interface(struct net_device *net_dev)
        INIT_WORK(&hard_iface->cleanup_work,
                  batadv_hardif_remove_interface_finish);
 
+       hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT;
+       if (batadv_is_wifi_netdev(net_dev))
+               hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
+
        /* extra reference for return */
        atomic_set(&hard_iface->refcount, 2);
 
        batadv_check_known_mac_addr(hard_iface->net_dev);
        list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
 
-       /* This can't be called via a bat_priv callback because
-        * we have no bat_priv yet.
-        */
-       atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
-       hard_iface->bat_iv.ogm_buff = NULL;
-
        return hard_iface;
 
 free_if:
@@ -595,7 +645,7 @@ void batadv_hardif_remove_interfaces(void)
 static int batadv_hard_if_event(struct notifier_block *this,
                                unsigned long event, void *ptr)
 {
-       struct net_device *net_dev = ptr;
+       struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
        struct batadv_hard_iface *hard_iface;
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_priv *bat_priv;
@@ -657,38 +707,6 @@ out:
        return NOTIFY_DONE;
 }
 
-/* This function returns true if the interface represented by ifindex is a
- * 802.11 wireless device
- */
-bool batadv_is_wifi_iface(int ifindex)
-{
-       struct net_device *net_device = NULL;
-       bool ret = false;
-
-       if (ifindex == BATADV_NULL_IFINDEX)
-               goto out;
-
-       net_device = dev_get_by_index(&init_net, ifindex);
-       if (!net_device)
-               goto out;
-
-#ifdef CONFIG_WIRELESS_EXT
-       /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
-        * check for wireless_handlers != NULL
-        */
-       if (net_device->wireless_handlers)
-               ret = true;
-       else
-#endif
-               /* cfg80211 drivers have to set ieee80211_ptr */
-               if (net_device->ieee80211_ptr)
-                       ret = true;
-out:
-       if (net_device)
-               dev_put(net_device);
-       return ret;
-}
-
 struct notifier_block batadv_hard_if_notifier = {
        .notifier_call = batadv_hard_if_event,
 };
index 0ba6c899b2d3512b9dd0bc0c4292fcf08d1f0154..b27508b8085cd84727bf89c434b0b56adcdcd87f 100644 (file)
@@ -177,13 +177,13 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
        if (len >= sizeof(struct batadv_icmp_packet_rr))
                packet_len = sizeof(struct batadv_icmp_packet_rr);
 
-       skb = dev_alloc_skb(packet_len + ETH_HLEN + NET_IP_ALIGN);
+       skb = netdev_alloc_skb_ip_align(NULL, packet_len + ETH_HLEN);
        if (!skb) {
                len = -ENOMEM;
                goto out;
        }
 
-       skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+       skb_reserve(skb, ETH_HLEN);
        icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
 
        if (copy_from_user(icmp_packet, buff, packet_len)) {
index 3e30a0f1b908b2872bcb6a13100306e0deda0c85..08125f3f6064ddf42e63c872a43b8f2e68a9f941 100644 (file)
@@ -163,16 +163,25 @@ void batadv_mesh_free(struct net_device *soft_iface)
        batadv_vis_quit(bat_priv);
 
        batadv_gw_node_purge(bat_priv);
-       batadv_originator_free(bat_priv);
        batadv_nc_free(bat_priv);
+       batadv_dat_free(bat_priv);
+       batadv_bla_free(bat_priv);
 
+       /* Free the TT and the originator tables only after having terminated
+        * all the other depending components which may use these structures for
+        * their purposes.
+        */
        batadv_tt_free(bat_priv);
 
-       batadv_bla_free(bat_priv);
-
-       batadv_dat_free(bat_priv);
+       /* Since the originator table clean up routine is accessing the TT
+        * tables as well, it has to be invoked after the TT tables have been
+        * freed and marked as empty. This ensures that no cleanup RCU callbacks
+        * accessing the TT data are scheduled for later execution.
+        */
+       batadv_originator_free(bat_priv);
 
        free_percpu(bat_priv->bat_counters);
+       bat_priv->bat_counters = NULL;
 
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
 }
@@ -464,7 +473,6 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
                crc = crc32c(crc, data, len);
                consumed += len;
        }
-       skb_abort_seq_read(&st);
 
        return htonl(crc);
 }
@@ -475,7 +483,7 @@ static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
        char *algo_name = (char *)val;
        size_t name_len = strlen(algo_name);
 
-       if (algo_name[name_len - 1] == '\n')
+       if (name_len > 0 && algo_name[name_len - 1] == '\n')
                algo_name[name_len - 1] = '\0';
 
        bat_algo_ops = batadv_algo_get(algo_name);
index 59a0d6af15c88aca2f05c0883fc2fdfeadea66f4..5e9aebb7d56b5c117ebc80a893145a1cb9b975ab 100644 (file)
@@ -26,7 +26,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2013.2.0"
+#define BATADV_SOURCE_VERSION "2013.3.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
 
 #define BATADV_LOG_BUF_LEN 8192          /* has to be a power of 2 */
 
+/* number of packets to send for broadcasts on different interface types */
+#define BATADV_NUM_BCASTS_DEFAULT 1
+#define BATADV_NUM_BCASTS_WIRELESS 3
+#define BATADV_NUM_BCASTS_MAX 3
+
 /* msecs after which an ARP_REQUEST is sent in broadcast as fallback */
 #define ARP_REQ_DELAY 250
 /* numbers of originator to contact for any PUT/GET DHT operation */
@@ -157,6 +162,17 @@ enum batadv_uev_type {
 #include <linux/seq_file.h>
 #include "types.h"
 
+/**
+ * batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
+ */
+enum batadv_vlan_flags {
+       BATADV_VLAN_HAS_TAG     = BIT(15),
+};
+
+#define BATADV_PRINT_VID(vid) (vid & BATADV_VLAN_HAS_TAG ? \
+                              (int)(vid & VLAN_VID_MASK) : -1)
+
 extern char batadv_routing_algo[];
 extern struct list_head batadv_hardif_list;
 
index f7c54305a9188f83dbd02af6eae287247854babf..a487d46e0aeccdb72ab4ad6e361dd0b4b87a7c05 100644 (file)
@@ -1245,7 +1245,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
                return;
 
        /* Set the mac header as if we actually sent the packet uncoded */
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
        memcpy(ethhdr->h_source, ethhdr->h_dest, ETH_ALEN);
        memcpy(ethhdr->h_dest, eth_dst_new, ETH_ALEN);
 
@@ -1359,18 +1359,17 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
  *  buffer
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
- * @ethhdr: pointer to the ethernet header inside the skb
  *
  * Returns true if the skb was consumed (encoded packet sent) or false otherwise
  */
 bool batadv_nc_skb_forward(struct sk_buff *skb,
-                          struct batadv_neigh_node *neigh_node,
-                          struct ethhdr *ethhdr)
+                          struct batadv_neigh_node *neigh_node)
 {
        const struct net_device *netdev = neigh_node->if_incoming->soft_iface;
        struct batadv_priv *bat_priv = netdev_priv(netdev);
        struct batadv_unicast_packet *packet;
        struct batadv_nc_path *nc_path;
+       struct ethhdr *ethhdr = eth_hdr(skb);
        __be32 packet_id;
        u8 *payload;
 
@@ -1423,7 +1422,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
 {
        struct batadv_unicast_packet *packet;
        struct batadv_nc_path *nc_path;
-       struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       struct ethhdr *ethhdr = eth_hdr(skb);
        __be32 packet_id;
        u8 *payload;
 
@@ -1482,7 +1481,7 @@ out:
 void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb)
 {
-       struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       struct ethhdr *ethhdr = eth_hdr(skb);
 
        if (batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                return;
@@ -1514,6 +1513,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
        struct ethhdr *ethhdr, ethhdr_tmp;
        uint8_t *orig_dest, ttl, ttvn;
        unsigned int coding_len;
+       int err;
 
        /* Save headers temporarily */
        memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
@@ -1532,7 +1532,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
        skb_reset_network_header(skb);
 
        /* Reconstruct original mac header */
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
        memcpy(ethhdr, &ethhdr_tmp, sizeof(*ethhdr));
 
        /* Select the correct unicast header information based on the location
@@ -1568,8 +1568,11 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
                         coding_len);
 
        /* Resize decoded skb if decoded with larger packet */
-       if (nc_packet->skb->len > coding_len + h_size)
-               pskb_trim_rcsum(skb, coding_len + h_size);
+       if (nc_packet->skb->len > coding_len + h_size) {
+               err = pskb_trim_rcsum(skb, coding_len + h_size);
+               if (err)
+                       return NULL;
+       }
 
        /* Create decoded unicast packet */
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -1673,7 +1676,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                return NET_RX_DROP;
 
        coded_packet = (struct batadv_coded_packet *)skb->data;
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        /* Verify frame is destined for us */
        if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest) &&
@@ -1759,6 +1762,13 @@ int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset)
                /* For each orig_node in this bin */
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+                       /* no need to print the orig node if it does not have
+                        * network coding neighbors
+                        */
+                       if (list_empty(&orig_node->in_coding_list) &&
+                           list_empty(&orig_node->out_coding_list))
+                               continue;
+
                        seq_printf(seq, "Node:      %pM\n", orig_node->orig);
 
                        seq_puts(seq, " Ingoing:  ");
index 4fa6d0caddbd394b69c46081c56f155c9665577f..85a4ec81ad50bda26449cfdadbcaf28e62391b8b 100644 (file)
@@ -36,8 +36,7 @@ void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
 void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv);
 void batadv_nc_init_orig(struct batadv_orig_node *orig_node);
 bool batadv_nc_skb_forward(struct sk_buff *skb,
-                          struct batadv_neigh_node *neigh_node,
-                          struct ethhdr *ethhdr);
+                          struct batadv_neigh_node *neigh_node);
 void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
                                      struct sk_buff *skb);
 void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
@@ -87,8 +86,7 @@ static inline void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
 }
 
 static inline bool batadv_nc_skb_forward(struct sk_buff *skb,
-                                        struct batadv_neigh_node *neigh_node,
-                                        struct ethhdr *ethhdr)
+                                        struct batadv_neigh_node *neigh_node)
 {
        return false;
 }
index 2f3452546636ce71747ef9a8b1a949caae8d8f04..f50553a7de629a411d94d307ba2ea54327e7b28f 100644 (file)
@@ -92,7 +92,7 @@ batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
 
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-                     const uint8_t *neigh_addr, uint32_t seqno)
+                     const uint8_t *neigh_addr)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_neigh_node *neigh_node;
@@ -110,8 +110,8 @@ batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
        atomic_set(&neigh_node->refcount, 2);
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Creating new neighbor %pM, initial seqno %d\n",
-                  neigh_addr, seqno);
+                  "Creating new neighbor %pM on interface %s\n", neigh_addr,
+                  hard_iface->net_dev->name);
 
 out:
        return neigh_node;
@@ -156,12 +156,28 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
        kfree(orig_node);
 }
 
+/**
+ * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
+ * schedule an rcu callback for freeing it
+ * @orig_node: the orig node to free
+ */
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
 {
        if (atomic_dec_and_test(&orig_node->refcount))
                call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
 }
 
+/**
+ * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
+ * possibly free it (without rcu callback)
+ * @orig_node: the orig node to free
+ */
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
+{
+       if (atomic_dec_and_test(&orig_node->refcount))
+               batadv_orig_node_free_rcu(&orig_node->rcu);
+}
+
 void batadv_originator_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
index 7df48fa7669dd0ac173a51bafe3d6538fb049e4b..7887b84a9af43adbff91cb8e3695b7f29c36a399 100644 (file)
@@ -26,11 +26,12 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
                                              const uint8_t *addr);
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-                     const uint8_t *neigh_addr, uint32_t seqno);
+                     const uint8_t *neigh_addr);
 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
 struct batadv_neigh_node *
 batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
deleted file mode 100644 (file)
index ccab0bb..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "ring_buffer.h"
-
-void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
-                           uint8_t value)
-{
-       lq_recv[*lq_index] = value;
-       *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
-}
-
-uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
-{
-       const uint8_t *ptr;
-       uint16_t count = 0, i = 0, sum = 0;
-
-       ptr = lq_recv;
-
-       while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
-               if (*ptr != 0) {
-                       count++;
-                       sum += *ptr;
-               }
-
-               i++;
-               ptr++;
-       }
-
-       if (count == 0)
-               return 0;
-
-       return (uint8_t)(sum / count);
-}
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
deleted file mode 100644 (file)
index 3f92ae2..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
-#define _NET_BATMAN_ADV_RING_BUFFER_H_
-
-void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
-                           uint8_t value);
-uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]);
-
-#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
index b27a4d792d1537fe2f53a492b6f3b19992e5dbc0..2f0bd3ffe6e8588cb1bf3f75d1dbc3d6b427f331 100644 (file)
 static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
-void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
-{
-       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node;
-       unsigned long *word;
-       uint32_t i;
-       size_t word_index;
-       uint8_t *w;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       spin_lock_bh(&orig_node->ogm_cnt_lock);
-                       word_index = hard_iface->if_num * BATADV_NUM_WORDS;
-                       word = &(orig_node->bcast_own[word_index]);
-
-                       batadv_bit_get_packet(bat_priv, word, 1, 0);
-                       w = &orig_node->bcast_own_sum[hard_iface->if_num];
-                       *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
-                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
-               }
-               rcu_read_unlock();
-       }
-}
-
 static void _batadv_update_route(struct batadv_priv *bat_priv,
                                 struct batadv_orig_node *orig_node,
                                 struct batadv_neigh_node *neigh_node)
@@ -256,7 +227,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
        if (unlikely(!pskb_may_pull(skb, header_len)))
                return false;
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        /* packet with broadcast indication but unicast recipient */
        if (!is_broadcast_ether_addr(ethhdr->h_dest))
@@ -314,7 +285,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
        icmp_packet->msg_type = BATADV_ECHO_REPLY;
        icmp_packet->header.ttl = BATADV_TTL;
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = NET_RX_SUCCESS;
 
 out:
@@ -362,7 +333,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
        icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
        icmp_packet->header.ttl = BATADV_TTL;
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = NET_RX_SUCCESS;
 
 out:
@@ -392,7 +363,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
                goto out;
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
@@ -439,7 +410,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        icmp_packet->header.ttl--;
 
        /* route it */
-       if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
+       if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
                ret = NET_RX_SUCCESS;
 
 out:
@@ -569,7 +540,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
                return -ENODATA;
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
@@ -803,8 +774,8 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_neigh_node *neigh_node = NULL;
        struct batadv_unicast_packet *unicast_packet;
-       struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
-       int ret = NET_RX_DROP;
+       struct ethhdr *ethhdr = eth_hdr(skb);
+       int res, ret = NET_RX_DROP;
        struct sk_buff *new_skb;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -864,16 +835,19 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        /* decrement ttl */
        unicast_packet->header.ttl--;
 
-       /* network code packet if possible */
-       if (batadv_nc_skb_forward(skb, neigh_node, ethhdr)) {
-               ret = NET_RX_SUCCESS;
-       } else if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) {
-               ret = NET_RX_SUCCESS;
+       res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
 
-               /* Update stats counter */
+       /* translate transmit result into receive result */
+       if (res == NET_XMIT_SUCCESS) {
+               /* skb was transmitted and consumed */
                batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
                batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
                                   skb->len + ETH_HLEN);
+
+               ret = NET_RX_SUCCESS;
+       } else if (res == NET_XMIT_POLICED) {
+               /* skb was buffered and consumed */
+               ret = NET_RX_SUCCESS;
        }
 
 out:
@@ -1165,7 +1139,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
                goto out;
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        /* packet with broadcast indication but unicast recipient */
        if (!is_broadcast_ether_addr(ethhdr->h_dest))
@@ -1265,7 +1239,7 @@ int batadv_recv_vis_packet(struct sk_buff *skb,
                return NET_RX_DROP;
 
        vis_packet = (struct batadv_vis_packet *)skb->data;
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        /* not for me */
        if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
index 99eeafaba4075a37df1562e7e1b890b6a717fb71..72a29bde201022300194422a9caff389ab409731 100644 (file)
@@ -20,7 +20,6 @@
 #ifndef _NET_BATMAN_ADV_ROUTING_H_
 #define _NET_BATMAN_ADV_ROUTING_H_
 
-void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface);
 bool batadv_check_management_packet(struct sk_buff *skb,
                                    struct batadv_hard_iface *hard_iface,
                                    int header_len);
index 263cfd1ccee78dfdf66c0f102d66301b8700a46d..e9ff8d801201279eaf91f347c730dbdd9e17f13d 100644 (file)
@@ -61,7 +61,7 @@ int batadv_send_skb_packet(struct sk_buff *skb,
 
        skb_reset_mac_header(skb);
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
        memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
        memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
        ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
@@ -96,26 +96,37 @@ send_skb_err:
  * host, NULL can be passed as recv_if and no interface alternating is
  * attempted.
  *
- * Returns TRUE on success; FALSE otherwise.
+ * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
+ * NET_XMIT_POLICED if the skb is buffered for later transmit.
  */
-bool batadv_send_skb_to_orig(struct sk_buff *skb,
-                            struct batadv_orig_node *orig_node,
-                            struct batadv_hard_iface *recv_if)
+int batadv_send_skb_to_orig(struct sk_buff *skb,
+                           struct batadv_orig_node *orig_node,
+                           struct batadv_hard_iface *recv_if)
 {
        struct batadv_priv *bat_priv = orig_node->bat_priv;
        struct batadv_neigh_node *neigh_node;
+       int ret = NET_XMIT_DROP;
 
        /* batadv_find_router() increases neigh_nodes refcount if found. */
        neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
        if (!neigh_node)
-               return false;
+               return ret;
 
-       /* route it */
-       batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       /* try to network code the packet, if it is received on an interface
+        * (i.e. being forwarded). If the packet originates from this node or if
+        * network coding fails, then send the packet as usual.
+        */
+       if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
+               ret = NET_XMIT_POLICED;
+       } else {
+               batadv_send_skb_packet(skb, neigh_node->if_incoming,
+                                      neigh_node->addr);
+               ret = NET_XMIT_SUCCESS;
+       }
 
        batadv_neigh_node_free_ref(neigh_node);
 
-       return true;
+       return ret;
 }
 
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
@@ -152,8 +163,6 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
                                 struct batadv_forw_packet *forw_packet,
                                 unsigned long send_time)
 {
-       INIT_HLIST_NODE(&forw_packet->list);
-
        /* add new packet to packet list */
        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
        hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
@@ -260,6 +269,9 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
                if (hard_iface->soft_iface != soft_iface)
                        continue;
 
+               if (forw_packet->num_packets >= hard_iface->num_bcasts)
+                       continue;
+
                /* send a copy of the saved skb */
                skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
                if (skb1)
@@ -271,7 +283,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
        forw_packet->num_packets++;
 
        /* if we still have some more bcasts to send */
-       if (forw_packet->num_packets < 3) {
+       if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
                _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
                                                 msecs_to_jiffies(5));
                return;
index 38e662f619ac6e45bebed3b982f444396ea94bd8..e7b17880fca4f46ef77783034e58ae1eeb00b641 100644 (file)
@@ -23,9 +23,9 @@
 int batadv_send_skb_packet(struct sk_buff *skb,
                           struct batadv_hard_iface *hard_iface,
                           const uint8_t *dst_addr);
-bool batadv_send_skb_to_orig(struct sk_buff *skb,
-                            struct batadv_orig_node *orig_node,
-                            struct batadv_hard_iface *recv_if);
+int batadv_send_skb_to_orig(struct sk_buff *skb,
+                           struct batadv_orig_node *orig_node,
+                           struct batadv_hard_iface *recv_if);
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
                                    const struct sk_buff *skb,
index 6f20d339e33adb3bab929d9bc406f3c8cebd959a..700d0b49742da54d0a1280b84c87416977d7d721 100644 (file)
@@ -154,7 +154,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
                                                    0x00, 0x00};
        unsigned int header_len = 0;
        int data_len = skb->len, ret;
-       short vid __maybe_unused = -1;
+       unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
        bool do_bcast = false;
        uint32_t seqno;
        unsigned long brd_delay = 1;
@@ -303,7 +303,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
        struct ethhdr *ethhdr;
        struct vlan_ethhdr *vhdr;
        struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
-       short vid __maybe_unused = -1;
+       unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
        __be16 ethertype = __constant_htons(ETH_P_BATMAN);
        bool is_bcast;
 
@@ -316,7 +316,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
        skb_pull_rcsum(skb, hdr_size);
        skb_reset_mac_header(skb);
 
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
@@ -505,6 +505,7 @@ unreg_debugfs:
        batadv_debugfs_del_meshif(dev);
 free_bat_counters:
        free_percpu(bat_priv->bat_counters);
+       bat_priv->bat_counters = NULL;
 
        return ret;
 }
index 15a22efa9a679e016e8a435b8564c5460327f116..929e304dacb254c333dd3dfbb94ba8de832153c6 100644 (file)
@@ -582,10 +582,7 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
            (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
                goto out;
 
-       if (!rtnl_trylock()) {
-               ret = -ERESTARTSYS;
-               goto out;
-       }
+       rtnl_lock();
 
        if (status_tmp == BATADV_IF_NOT_IN_USE) {
                batadv_hardif_disable_interface(hard_iface,
index 5e89deeb9542979c72ba635fb1c18d301a9c1621..429aeef3d8b2d98e86e5e0772496d6796dc86798 100644 (file)
@@ -144,7 +144,12 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
        struct batadv_tt_orig_list_entry *orig_entry;
 
        orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
-       batadv_orig_node_free_ref(orig_entry->orig_node);
+
+       /* We are in an rcu callback here, therefore we cannot use
+        * batadv_orig_node_free_ref() and its call_rcu():
+        * An rcu_barrier() wouldn't wait for that to finish
+        */
+       batadv_orig_node_free_ref_now(orig_entry->orig_node);
        kfree(orig_entry);
 }
 
@@ -158,10 +163,19 @@ batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
        call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
 }
 
+/**
+ * batadv_tt_local_event - store a local TT event (ADD/DEL)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_local_entry: the TT entry involved in the event
+ * @event_flags: flags to store in the event structure
+ */
 static void batadv_tt_local_event(struct batadv_priv *bat_priv,
-                                 const uint8_t *addr, uint8_t flags)
+                                 struct batadv_tt_local_entry *tt_local_entry,
+                                 uint8_t event_flags)
 {
        struct batadv_tt_change_node *tt_change_node, *entry, *safe;
+       struct batadv_tt_common_entry *common = &tt_local_entry->common;
+       uint8_t flags = common->flags | event_flags;
        bool event_removed = false;
        bool del_op_requested, del_op_entry;
 
@@ -171,7 +185,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
                return;
 
        tt_change_node->change.flags = flags;
-       memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
+       memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
 
        del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
@@ -179,7 +193,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
        spin_lock_bh(&bat_priv->tt.changes_list_lock);
        list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
-               if (!batadv_compare_eth(entry->change.addr, addr))
+               if (!batadv_compare_eth(entry->change.addr, common->addr))
                        continue;
 
                /* DEL+ADD in the same orig interval have no effect and can be
@@ -327,7 +341,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        }
 
 add_event:
-       batadv_tt_local_event(bat_priv, addr, tt_local->common.flags);
+       batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
 
 check_roaming:
        /* Check whether it is a roaming, but don't do anything if the roaming
@@ -524,8 +538,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
                            struct batadv_tt_local_entry *tt_local_entry,
                            uint16_t flags, const char *message)
 {
-       batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
-                             tt_local_entry->common.flags | flags);
+       batadv_tt_local_event(bat_priv, tt_local_entry, flags);
 
        /* The local client has to be marked as "pending to be removed" but has
         * to be kept in the table in order to send it in a full table
@@ -579,8 +592,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        /* if this client has been added right now, it is possible to
         * immediately purge it
         */
-       batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
-                             curr_flags | BATADV_TT_CLIENT_DEL);
+       batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
        hlist_del_rcu(&tt_local_entry->common.hash_entry);
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
@@ -786,10 +798,25 @@ out:
                batadv_tt_orig_list_entry_free_ref(orig_entry);
 }
 
-/* caller must hold orig_node refcount */
+/**
+ * batadv_tt_global_add - add a new TT global entry or update an existing one
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator announcing the client
+ * @tt_addr: the mac address of the non-mesh client
+ * @flags: TT flags that have to be set for this non-mesh client
+ * @ttvn: the tt version number ever announcing this non-mesh client
+ *
+ * Add a new TT global entry for the given originator. If the entry already
+ * exists add a new reference to the given originator (a global entry can have
+ * references to multiple originators) and adjust the flags attribute to reflect
+ * the function argument.
+ * If a TT local entry exists for this non-mesh client remove it.
+ *
+ * The caller must hold orig_node refcount.
+ */
 int batadv_tt_global_add(struct batadv_priv *bat_priv,
                         struct batadv_orig_node *orig_node,
-                        const unsigned char *tt_addr, uint8_t flags,
+                        const unsigned char *tt_addr, uint16_t flags,
                         uint8_t ttvn)
 {
        struct batadv_tt_global_entry *tt_global_entry;
@@ -1595,11 +1622,11 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
        len = tt_query_size + tt_len;
-       skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
+       skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
        if (!skb)
                goto out;
 
-       skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+       skb_reserve(skb, ETH_HLEN);
        tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
        tt_response->ttvn = ttvn;
 
@@ -1660,11 +1687,11 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
        if (!tt_req_node)
                goto out;
 
-       skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN + NET_IP_ALIGN);
+       skb = netdev_alloc_skb_ip_align(NULL, sizeof(*tt_request) + ETH_HLEN);
        if (!skb)
                goto out;
 
-       skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+       skb_reserve(skb, ETH_HLEN);
 
        tt_req_len = sizeof(*tt_request);
        tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
@@ -1686,7 +1713,7 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
 
-       if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL))
+       if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL) != NET_XMIT_DROP)
                ret = 0;
 
 out:
@@ -1710,7 +1737,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
        struct batadv_orig_node *req_dst_orig_node;
        struct batadv_orig_node *res_dst_orig_node = NULL;
        uint8_t orig_ttvn, req_ttvn, ttvn;
-       int ret = false;
+       int res, ret = false;
        unsigned char *tt_buff;
        bool full_table;
        uint16_t tt_len, tt_tot;
@@ -1757,11 +1784,11 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
                len = sizeof(*tt_response) + tt_len;
-               skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
                if (!skb)
                        goto unlock;
 
-               skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+               skb_reserve(skb, ETH_HLEN);
                packet_pos = skb_put(skb, len);
                tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
@@ -1805,8 +1832,10 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-       if (batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL))
+       res = batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL);
+       if (res != NET_XMIT_DROP)
                ret = true;
+
        goto out;
 
 unlock:
@@ -1873,11 +1902,11 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
                len = sizeof(*tt_response) + tt_len;
-               skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
+               skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
                if (!skb)
                        goto unlock;
 
-               skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+               skb_reserve(skb, ETH_HLEN);
                packet_pos = skb_put(skb, len);
                tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
@@ -1920,7 +1949,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = true;
        goto out;
 
@@ -2207,11 +2236,11 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
        if (!batadv_tt_check_roam_count(bat_priv, client))
                goto out;
 
-       skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN + NET_IP_ALIGN);
+       skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
        if (!skb)
                goto out;
 
-       skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+       skb_reserve(skb, ETH_HLEN);
 
        roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
 
@@ -2233,7 +2262,7 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = 0;
 
 out:
index ab8e683b402f0686a6e1ffe4168f99961c898bc4..659a3bb759ce87087c4d5697239f4724c815971c 100644 (file)
@@ -33,7 +33,7 @@ void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
                               const unsigned char *tt_buff, int tt_buff_len);
 int batadv_tt_global_add(struct batadv_priv *bat_priv,
                         struct batadv_orig_node *orig_node,
-                        const unsigned char *addr, uint8_t flags,
+                        const unsigned char *addr, uint16_t flags,
                         uint8_t ttvn);
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
index aba8364c368991935ab5f9899266a6662f925618..b2c94e1393191e00165e42e117ded3dd2374f70f 100644 (file)
@@ -61,6 +61,7 @@ struct batadv_hard_iface_bat_iv {
  * @if_status: status of the interface for batman-adv
  * @net_dev: pointer to the net_device
  * @frag_seqno: last fragment sequence number sent by this interface
+ * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
  * @hardif_obj: kobject of the per interface sysfs "mesh" directory
  * @refcount: number of contexts the object is used
  * @batman_adv_ptype: packet type describing packets that should be processed by
@@ -76,6 +77,7 @@ struct batadv_hard_iface {
        char if_status;
        struct net_device *net_dev;
        atomic_t frag_seqno;
+       uint8_t num_bcasts;
        struct kobject *hardif_obj;
        atomic_t refcount;
        struct packet_type batman_adv_ptype;
@@ -640,7 +642,7 @@ struct batadv_socket_packet {
 #ifdef CONFIG_BATMAN_ADV_BLA
 struct batadv_bla_backbone_gw {
        uint8_t orig[ETH_ALEN];
-       short vid;
+       unsigned short vid;
        struct hlist_node hash_entry;
        struct batadv_priv *bat_priv;
        unsigned long lasttime;
@@ -663,7 +665,7 @@ struct batadv_bla_backbone_gw {
  */
 struct batadv_bla_claim {
        uint8_t addr[ETH_ALEN];
-       short vid;
+       unsigned short vid;
        struct batadv_bla_backbone_gw *backbone_gw;
        unsigned long lasttime;
        struct hlist_node hash_entry;
index 0bb3b5982f94c9480741ad224594e7b0a9143e0f..dc8b5d4dd636d3d5ed982a5d93c2783f203ac4f5 100644 (file)
@@ -464,7 +464,7 @@ find_router:
                goto out;
        }
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL))
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = 0;
 
 out:
index 1625e5793a895d02aec97966f901ccfb8a29e4ab..4983340f1943c5a0cc491785bf835981e7c44a1a 100644 (file)
@@ -392,12 +392,12 @@ batadv_add_packet(struct batadv_priv *bat_priv,
                return NULL;
 
        len = sizeof(*packet) + vis_info_len;
-       info->skb_packet = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
+       info->skb_packet = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
        if (!info->skb_packet) {
                kfree(info);
                return NULL;
        }
-       skb_reserve(info->skb_packet, ETH_HLEN + NET_IP_ALIGN);
+       skb_reserve(info->skb_packet, ETH_HLEN);
        packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
 
        kref_init(&info->refcount);
@@ -697,7 +697,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
        struct batadv_orig_node *orig_node;
        struct batadv_vis_packet *packet;
        struct sk_buff *skb;
-       uint32_t i;
+       uint32_t i, res;
 
 
        packet = (struct batadv_vis_packet *)info->skb_packet->data;
@@ -724,7 +724,8 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
                        if (!skb)
                                continue;
 
-                       if (!batadv_send_skb_to_orig(skb, orig_node, NULL))
+                       res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+                       if (res == NET_XMIT_DROP)
                                kfree_skb(skb);
                }
                rcu_read_unlock();
@@ -748,7 +749,7 @@ static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
        if (!skb)
                goto out;
 
-       if (!batadv_send_skb_to_orig(skb, orig_node, NULL))
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
                kfree_skb(skb);
 
 out:
@@ -854,13 +855,13 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
        if (!bat_priv->vis.my_info)
                goto err;
 
-       len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE;
-       len += ETH_HLEN + NET_IP_ALIGN;
-       bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
+       len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
+       bat_priv->vis.my_info->skb_packet = netdev_alloc_skb_ip_align(NULL,
+                                                                     len);
        if (!bat_priv->vis.my_info->skb_packet)
                goto free_info;
 
-       skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN + NET_IP_ALIGN);
+       skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
        tmp_skb = bat_priv->vis.my_info->skb_packet;
        packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
 
index 967312803e4130f4e27712daed6d9dd9e2a1a9d6..2ef66781fedb3a9211448fa3db73eedab2c7d8a2 100644 (file)
@@ -22,6 +22,9 @@
 #include <asm/uaccess.h>
 #include "br_private.h"
 
+#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
+                        NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
+
 /* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -55,10 +58,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_pull(skb, ETH_HLEN);
 
        if (is_broadcast_ether_addr(dest))
-               br_flood_deliver(br, skb);
+               br_flood_deliver(br, skb, false);
        else if (is_multicast_ether_addr(dest)) {
                if (unlikely(netpoll_tx_running(dev))) {
-                       br_flood_deliver(br, skb);
+                       br_flood_deliver(br, skb, false);
                        goto out;
                }
                if (br_multicast_rcv(br, NULL, skb)) {
@@ -70,11 +73,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
                        br_multicast_deliver(mdst, skb);
                else
-                       br_flood_deliver(br, skb);
+                       br_flood_deliver(br, skb, false);
        } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
                br_deliver(dst->dst, skb);
        else
-               br_flood_deliver(br, skb);
+               br_flood_deliver(br, skb, true);
 
 out:
        rcu_read_unlock();
@@ -346,12 +349,10 @@ void br_dev_setup(struct net_device *dev)
        dev->tx_queue_len = 0;
        dev->priv_flags = IFF_EBRIDGE;
 
-       dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-                       NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
-                       NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX;
-       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
-                          NETIF_F_HW_VLAN_CTAG_TX;
+       dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
+                       NETIF_F_HW_VLAN_CTAG_TX;
+       dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
+       dev->vlan_features = COMMON_FEATURES;
 
        br->dev = dev;
        spin_lock_init(&br->lock);
index 092b20e4ee4c3fc3a054477b67d2273263adf543..4b81b147178987f6ea29ee6efba261cd738102b9 100644 (file)
@@ -174,7 +174,8 @@ out:
 static void br_flood(struct net_bridge *br, struct sk_buff *skb,
                     struct sk_buff *skb0,
                     void (*__packet_hook)(const struct net_bridge_port *p,
-                                          struct sk_buff *skb))
+                                          struct sk_buff *skb),
+                    bool unicast)
 {
        struct net_bridge_port *p;
        struct net_bridge_port *prev;
@@ -182,6 +183,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
        prev = NULL;
 
        list_for_each_entry_rcu(p, &br->port_list, list) {
+               /* Do not flood unicast traffic to ports that turn it off */
+               if (unicast && !(p->flags & BR_FLOOD))
+                       continue;
                prev = maybe_deliver(prev, p, skb, __packet_hook);
                if (IS_ERR(prev))
                        goto out;
@@ -203,16 +207,16 @@ out:
 
 
 /* called with rcu_read_lock */
-void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
 {
-       br_flood(br, skb, NULL, __br_deliver);
+       br_flood(br, skb, NULL, __br_deliver, unicast);
 }
 
 /* called under bridge lock */
 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
-                     struct sk_buff *skb2)
+                     struct sk_buff *skb2, bool unicast)
 {
-       br_flood(br, skb, skb2, __br_forward);
+       br_flood(br, skb, skb2, __br_forward, unicast);
 }
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
index 4cdba60926ffc91c52c031793eb562fb38a84a4a..5623be6b9ecda3f77d62cb2db7fc5e9aef2bd65c 100644 (file)
@@ -221,7 +221,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
        p->path_cost = port_cost(dev);
        p->priority = 0x8000 >> BR_PORT_BITS;
        p->port_no = index;
-       p->flags = 0;
+       p->flags = BR_LEARNING | BR_FLOOD;
        br_init_port(p);
        p->state = BR_STATE_DISABLED;
        br_stp_port_timer_init(p);
index 828e2bcc1f525570809b652c98e1c011b2850940..1b8b8b824cd766b05665e1d89f0e1394abdfbe2f 100644 (file)
@@ -65,6 +65,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
        struct net_bridge_fdb_entry *dst;
        struct net_bridge_mdb_entry *mdst;
        struct sk_buff *skb2;
+       bool unicast = true;
        u16 vid = 0;
 
        if (!p || p->state == BR_STATE_DISABLED)
@@ -75,7 +76,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
 
        /* insert into forwarding database after filtering to avoid spoofing */
        br = p->br;
-       br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
+       if (p->flags & BR_LEARNING)
+               br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
 
        if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
            br_multicast_rcv(br, p, skb))
@@ -94,9 +96,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
 
        dst = NULL;
 
-       if (is_broadcast_ether_addr(dest))
+       if (is_broadcast_ether_addr(dest)) {
                skb2 = skb;
-       else if (is_multicast_ether_addr(dest)) {
+               unicast = false;
+       } else if (is_multicast_ether_addr(dest)) {
                mdst = br_mdb_get(br, skb, vid);
                if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
                        if ((mdst && mdst->mglist) ||
@@ -109,6 +112,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
                } else
                        skb2 = skb;
 
+               unicast = false;
                br->dev->stats.multicast++;
        } else if ((dst = __br_fdb_get(br, dest, vid)) &&
                        dst->is_local) {
@@ -122,7 +126,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
                        dst->used = jiffies;
                        br_forward(dst->dst, skb, skb2);
                } else
-                       br_flood_forward(br, skb, skb2);
+                       br_flood_forward(br, skb, skb2, unicast);
        }
 
        if (skb2)
@@ -142,7 +146,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
        u16 vid = 0;
 
        br_vlan_get_tag(skb, &vid);
-       br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
+       if (p->flags & BR_LEARNING)
+               br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
        return 0;        /* process further */
 }
 
index 81f2389f78eb884e80cafbd624d3029aa48e3ed1..81befac015e1753339cd0bea01d17ae8faee1914 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/timer.h>
+#include <linux/inetdevice.h>
 #include <net/ip.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -381,7 +382,8 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
        iph->frag_off = htons(IP_DF);
        iph->ttl = 1;
        iph->protocol = IPPROTO_IGMP;
-       iph->saddr = 0;
+       iph->saddr = br->multicast_query_use_ifaddr ?
+                    inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
        iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
        ((u8 *)&iph[1])[0] = IPOPT_RA;
        ((u8 *)&iph[1])[1] = 4;
@@ -465,8 +467,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        skb_set_transport_header(skb, skb->len);
        mldq = (struct mld_msg *) icmp6_hdr(skb);
 
-       interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
-                                         br->multicast_query_response_interval;
+       interval = ipv6_addr_any(group) ?
+                       br->multicast_query_response_interval :
+                       br->multicast_last_member_interval;
 
        mldq->mld_type = ICMPV6_MGM_QUERY;
        mldq->mld_code = 0;
@@ -615,8 +618,6 @@ rehash:
 
        mp->br = br;
        mp->addr = *group;
-       setup_timer(&mp->timer, br_multicast_group_expired,
-                   (unsigned long)mp);
 
        hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
        mdb->size++;
@@ -654,7 +655,6 @@ static int br_multicast_add_group(struct net_bridge *br,
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
        struct net_bridge_port_group __rcu **pp;
-       unsigned long now = jiffies;
        int err;
 
        spin_lock(&br->multicast_lock);
@@ -669,7 +669,6 @@ static int br_multicast_add_group(struct net_bridge *br,
 
        if (!port) {
                mp->mglist = true;
-               mod_timer(&mp->timer, now + br->multicast_membership_interval);
                goto out;
        }
 
@@ -677,7 +676,7 @@ static int br_multicast_add_group(struct net_bridge *br,
             (p = mlock_dereference(*pp, br)) != NULL;
             pp = &p->next) {
                if (p->port == port)
-                       goto found;
+                       goto out;
                if ((unsigned long)p->port < (unsigned long)port)
                        break;
        }
@@ -688,8 +687,6 @@ static int br_multicast_add_group(struct net_bridge *br,
        rcu_assign_pointer(*pp, p);
        br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
 
-found:
-       mod_timer(&p->timer, now + br->multicast_membership_interval);
 out:
        err = 0;
 
@@ -1015,7 +1012,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 #endif
 
 /*
- * Add port to rotuer_list
+ * Add port to router_list
  *  list is maintained ordered by pointer value
  *  and locked by br->multicast_lock and RCU
  */
@@ -1129,6 +1126,10 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
+       setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
+       mod_timer(&mp->timer, now + br->multicast_membership_interval);
+       mp->timer_armed = true;
+
        max_delay *= br->multicast_last_member_count;
 
        if (mp->mglist &&
@@ -1203,6 +1204,10 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
+       setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
+       mod_timer(&mp->timer, now + br->multicast_membership_interval);
+       mp->timer_armed = true;
+
        max_delay *= br->multicast_last_member_count;
        if (mp->mglist &&
            (timer_pending(&mp->timer) ?
@@ -1246,6 +1251,32 @@ static void br_multicast_leave_group(struct net_bridge *br,
        if (!mp)
                goto out;
 
+       if (br->multicast_querier &&
+           !timer_pending(&br->multicast_querier_timer)) {
+               __br_multicast_send_query(br, port, &mp->addr);
+
+               time = jiffies + br->multicast_last_member_count *
+                                br->multicast_last_member_interval;
+               mod_timer(port ? &port->multicast_query_timer :
+                                &br->multicast_query_timer, time);
+
+               for (p = mlock_dereference(mp->ports, br);
+                    p != NULL;
+                    p = mlock_dereference(p->next, br)) {
+                       if (p->port != port)
+                               continue;
+
+                       if (!hlist_unhashed(&p->mglist) &&
+                           (timer_pending(&p->timer) ?
+                            time_after(p->timer.expires, time) :
+                            try_to_del_timer_sync(&p->timer) >= 0)) {
+                               mod_timer(&p->timer, time);
+                       }
+
+                       break;
+               }
+       }
+
        if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
                struct net_bridge_port_group __rcu **pp;
 
@@ -1261,7 +1292,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
                        br_mdb_notify(br->dev, port, group, RTM_DELMDB);
 
-                       if (!mp->ports && !mp->mglist &&
+                       if (!mp->ports && !mp->mglist && mp->timer_armed &&
                            netif_running(br->dev))
                                mod_timer(&mp->timer, jiffies);
                }
@@ -1273,30 +1304,12 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     br->multicast_last_member_interval;
 
        if (!port) {
-               if (mp->mglist &&
+               if (mp->mglist && mp->timer_armed &&
                    (timer_pending(&mp->timer) ?
                     time_after(mp->timer.expires, time) :
                     try_to_del_timer_sync(&mp->timer) >= 0)) {
                        mod_timer(&mp->timer, time);
                }
-
-               goto out;
-       }
-
-       for (p = mlock_dereference(mp->ports, br);
-            p != NULL;
-            p = mlock_dereference(p->next, br)) {
-               if (p->port != port)
-                       continue;
-
-               if (!hlist_unhashed(&p->mglist) &&
-                   (timer_pending(&p->timer) ?
-                    time_after(p->timer.expires, time) :
-                    try_to_del_timer_sync(&p->timer) >= 0)) {
-                       mod_timer(&p->timer, time);
-               }
-
-               break;
        }
 
 out:
@@ -1618,6 +1631,7 @@ void br_multicast_init(struct net_bridge *br)
 
        br->multicast_router = 1;
        br->multicast_querier = 0;
+       br->multicast_query_use_ifaddr = 0;
        br->multicast_last_member_count = 2;
        br->multicast_startup_query_count = 2;
 
@@ -1671,6 +1685,7 @@ void br_multicast_stop(struct net_bridge *br)
                hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
                                          hlist[ver]) {
                        del_timer(&mp->timer);
+                       mp->timer_armed = false;
                        call_rcu_bh(&mp->rcu, br_multicast_free_group);
                }
        }
index 1ed75bfd8d1d72f6ae89bef20191eb824d3aac6c..f87736270eaa875ba0060048a38cfabacfd29444 100644 (file)
@@ -992,7 +992,7 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
 
 #ifdef CONFIG_SYSCTL
 static
-int brnf_sysctl_call_tables(ctl_table * ctl, int write,
+int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
                            void __user * buffer, size_t * lenp, loff_t * ppos)
 {
        int ret;
@@ -1004,7 +1004,7 @@ int brnf_sysctl_call_tables(ctl_table * ctl, int write,
        return ret;
 }
 
-static ctl_table brnf_table[] = {
+static struct ctl_table brnf_table[] = {
        {
                .procname       = "bridge-nf-call-arptables",
                .data           = &brnf_call_arptables,
index 8e3abf564798a9044feb2c6e668fda46f30f2dae..1fc30abd3a523912376ce01fcae932f3b6b8c746 100644 (file)
@@ -30,6 +30,8 @@ static inline size_t br_port_info_size(void)
                + nla_total_size(1)     /* IFLA_BRPORT_GUARD */
                + nla_total_size(1)     /* IFLA_BRPORT_PROTECT */
                + nla_total_size(1)     /* IFLA_BRPORT_FAST_LEAVE */
+               + nla_total_size(1)     /* IFLA_BRPORT_LEARNING */
+               + nla_total_size(1)     /* IFLA_BRPORT_UNICAST_FLOOD */
                + 0;
 }
 
@@ -56,7 +58,9 @@ static int br_port_fill_attrs(struct sk_buff *skb,
            nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
            nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
            nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
-           nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)))
+           nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
+           nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
+           nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)))
                return -EMSGSIZE;
 
        return 0;
@@ -281,6 +285,8 @@ static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_MODE]      = { .type = NLA_U8 },
        [IFLA_BRPORT_GUARD]     = { .type = NLA_U8 },
        [IFLA_BRPORT_PROTECT]   = { .type = NLA_U8 },
+       [IFLA_BRPORT_LEARNING]  = { .type = NLA_U8 },
+       [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
@@ -328,6 +334,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
        br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
        br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
        br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
+       br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
+       br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
 
        if (tb[IFLA_BRPORT_COST]) {
                err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
index 1644b3e1f947a554f944b3e6cf655a0346d7dc66..3a3f371b28415c110e4db6a21069f9153dafe904 100644 (file)
@@ -31,7 +31,7 @@ struct notifier_block br_device_notifier = {
  */
 static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net_bridge_port *p;
        struct net_bridge *br;
        bool changed_addr;
index d2c043a857b6a0fd3bfd208f018179b96c42dacd..3be89b3ce17b5a314215609790cadec6e5453727 100644 (file)
@@ -112,6 +112,7 @@ struct net_bridge_mdb_entry
        struct timer_list               timer;
        struct br_ip                    addr;
        bool                            mglist;
+       bool                            timer_armed;
 };
 
 struct net_bridge_mdb_htable
@@ -157,6 +158,8 @@ struct net_bridge_port
 #define BR_ROOT_BLOCK          0x00000004
 #define BR_MULTICAST_FAST_LEAVE        0x00000008
 #define BR_ADMIN_COST          0x00000010
+#define BR_LEARNING            0x00000020
+#define BR_FLOOD               0x00000040
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        u32                             multicast_startup_queries_sent;
@@ -249,6 +252,7 @@ struct net_bridge
 
        u8                              multicast_disabled:1;
        u8                              multicast_querier:1;
+       u8                              multicast_query_use_ifaddr:1;
 
        u32                             hash_elasticity;
        u32                             hash_max;
@@ -411,9 +415,10 @@ extern int br_dev_queue_push_xmit(struct sk_buff *skb);
 extern void br_forward(const struct net_bridge_port *to,
                struct sk_buff *skb, struct sk_buff *skb0);
 extern int br_forward_finish(struct sk_buff *skb);
-extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
+extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb,
+                            bool unicast);
 extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
-                            struct sk_buff *skb2);
+                            struct sk_buff *skb2, bool unicast);
 
 /* br_if.c */
 extern void br_port_carrier_check(struct net_bridge_port *p);
index 8baa9c08e1a4b1442d4d99e4d448f25b03685e47..394bb96b608707aa1ab943f66bc52fa173859706 100644 (file)
@@ -375,6 +375,31 @@ static ssize_t store_multicast_snooping(struct device *d,
 static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
                   show_multicast_snooping, store_multicast_snooping);
 
+static ssize_t show_multicast_query_use_ifaddr(struct device *d,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       struct net_bridge *br = to_bridge(d);
+       return sprintf(buf, "%d\n", br->multicast_query_use_ifaddr);
+}
+
+static int set_query_use_ifaddr(struct net_bridge *br, unsigned long val)
+{
+       br->multicast_query_use_ifaddr = !!val;
+       return 0;
+}
+
+static ssize_t
+store_multicast_query_use_ifaddr(struct device *d,
+                                struct device_attribute *attr,
+                                const char *buf, size_t len)
+{
+       return store_bridge_parm(d, buf, len, set_query_use_ifaddr);
+}
+static DEVICE_ATTR(multicast_query_use_ifaddr, S_IRUGO | S_IWUSR,
+                  show_multicast_query_use_ifaddr,
+                  store_multicast_query_use_ifaddr);
+
 static ssize_t show_multicast_querier(struct device *d,
                                      struct device_attribute *attr,
                                      char *buf)
@@ -734,6 +759,7 @@ static struct attribute *bridge_attrs[] = {
        &dev_attr_multicast_router.attr,
        &dev_attr_multicast_snooping.attr,
        &dev_attr_multicast_querier.attr,
+       &dev_attr_multicast_query_use_ifaddr.attr,
        &dev_attr_hash_elasticity.attr,
        &dev_attr_hash_max.attr,
        &dev_attr_multicast_last_member_count.attr,
index a1ef1b6e14dc0df71bc72bf6a5ca18bdc8d255cc..2a2cdb756d51e1944cb707b2945ba034ab22fe71 100644 (file)
@@ -158,6 +158,8 @@ static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
 BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
 BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
 BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
+BRPORT_ATTR_FLAG(learning, BR_LEARNING);
+BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -195,6 +197,8 @@ static const struct brport_attribute *brport_attrs[] = {
        &brport_attr_hairpin_mode,
        &brport_attr_bpdu_guard,
        &brport_attr_root_block,
+       &brport_attr_learning,
+       &brport_attr_unicast_flood,
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        &brport_attr_multicast_router,
        &brport_attr_multicast_fast_leave,
index 9878eb8204c524334be49d0445cae9c29927ca0d..19c37a4929bcd7619d43573285d5d33dfa3b865f 100644 (file)
@@ -72,13 +72,12 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
 }
 
 static void
-ebt_log_packet(u_int8_t pf, unsigned int hooknum,
-   const struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, const struct nf_loginfo *loginfo,
-   const char *prefix)
+ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
+              const struct sk_buff *skb, const struct net_device *in,
+              const struct net_device *out, const struct nf_loginfo *loginfo,
+              const char *prefix)
 {
        unsigned int bitmask;
-       struct net *net = dev_net(in ? in : out);
 
        /* FIXME: Disabled from containers until syslog ns is supported */
        if (!net_eq(net, &init_net))
@@ -191,7 +190,7 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
                nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
                              par->in, par->out, &li, "%s", info->prefix);
        else
-               ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in,
+               ebt_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in,
                               par->out, &li, info->prefix);
        return EBT_CONTINUE;
 }
index fc1905c514178a1ae439060588e06a7c5d26e446..518093802d1d640f3642e997b1481279fad9b68b 100644 (file)
@@ -131,14 +131,16 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
        return skb;
 }
 
-static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
-   const struct net_device *in, const struct net_device *out,
-   const struct ebt_ulog_info *uloginfo, const char *prefix)
+static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
+                           const struct sk_buff *skb,
+                           const struct net_device *in,
+                           const struct net_device *out,
+                           const struct ebt_ulog_info *uloginfo,
+                           const char *prefix)
 {
        ebt_ulog_packet_msg_t *pm;
        size_t size, copy_len;
        struct nlmsghdr *nlh;
-       struct net *net = dev_net(in ? in : out);
        struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
        unsigned int group = uloginfo->nlgroup;
        ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
@@ -233,7 +235,7 @@ unlock:
 }
 
 /* this function is registered with the netfilter core */
-static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
+static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
    const struct sk_buff *skb, const struct net_device *in,
    const struct net_device *out, const struct nf_loginfo *li,
    const char *prefix)
@@ -252,13 +254,15 @@ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
                strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
        }
 
-       ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+       ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
 }
 
 static unsigned int
 ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       ebt_ulog_packet(par->hooknum, skb, par->in, par->out,
+       struct net *net = dev_net(par->in ? par->in : par->out);
+
+       ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
                        par->targinfo, NULL);
        return EBT_CONTINUE;
 }
@@ -267,6 +271,12 @@ static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
 {
        struct ebt_ulog_info *uloginfo = par->targinfo;
 
+       if (!par->net->xt.ebt_ulog_warn_deprecated) {
+               pr_info("ebt_ulog is deprecated and it will be removed soon, "
+                       "use ebt_nflog instead\n");
+               par->net->xt.ebt_ulog_warn_deprecated = true;
+       }
+
        if (uloginfo->nlgroup > 31)
                return -EINVAL;
 
index 3d110c4fc7870dda1d6efcb00f9169f819dc6526..ac7802428384518558bb01d97478346694f62d29 100644 (file)
@@ -1339,7 +1339,7 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m,
 
        /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
           long. Copy 29 bytes and fill remaining bytes with zeroes. */
-       strncpy(name, m->u.match->name, sizeof(name));
+       strlcpy(name, m->u.match->name, sizeof(name));
        if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
                return -EFAULT;
        return 0;
@@ -1351,7 +1351,7 @@ static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
        char __user *hlp = ubase + ((char *)w - base);
        char name[EBT_FUNCTION_MAXNAMELEN] = {};
 
-       strncpy(name, w->u.watcher->name, sizeof(name));
+       strlcpy(name, w->u.watcher->name, sizeof(name));
        if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
                return -EFAULT;
        return 0;
@@ -1377,7 +1377,7 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
        ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
        if (ret != 0)
                return ret;
-       strncpy(name, t->u.target->name, sizeof(name));
+       strlcpy(name, t->u.target->name, sizeof(name));
        if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
                return -EFAULT;
        return 0;
index 1f9ece1a9c344655708c7d0e015ed0a46dd56a03..4dca159435cfe17dcc09fadccfebee7ca49b1075 100644 (file)
@@ -352,9 +352,9 @@ EXPORT_SYMBOL(caif_enroll_dev);
 
 /* notify Caif of device events */
 static int caif_device_notify(struct notifier_block *me, unsigned long what,
-                             void *arg)
+                             void *ptr)
 {
-       struct net_device *dev = arg;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct caif_device_entry *caifd = NULL;
        struct caif_dev_common *caifdev;
        struct cfcnfg *cfg;
index 942e00a425fd9b015118cb7a0982209c1989003b..75ed04b78fa4090f69329cccd0cebd04bc516290 100644 (file)
@@ -121,9 +121,9 @@ static struct packet_type caif_usb_type __read_mostly = {
 };
 
 static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
-                               void *arg)
+                               void *ptr)
 {
-       struct net_device *dev = arg;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct caif_dev_common common;
        struct cflayer *layer, *link_support;
        struct usbnet *usbnet;
index c4e50852c9f4aa8e195ed57e711890210cea1e1c..3ab8dd2e12828fea31d97b59dbeb3fe7521e953b 100644 (file)
@@ -794,9 +794,9 @@ EXPORT_SYMBOL(can_proto_unregister);
  * af_can notifier to create/remove CAN netdevice specific structs
  */
 static int can_notifier(struct notifier_block *nb, unsigned long msg,
-                       void *data)
+                       void *ptr)
 {
-       struct net_device *dev = (struct net_device *)data;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct dev_rcv_lists *d;
 
        if (!net_eq(dev_net(dev), &init_net))
index 8f113e6ff32750d3809c3ab38117d6ad1ebc8f38..46f20bfafc0ed510421cf743ad3674efce803018 100644 (file)
@@ -1350,9 +1350,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
  * notification handler for netdevice status changes
  */
 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
-                       void *data)
+                       void *ptr)
 {
-       struct net_device *dev = (struct net_device *)data;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
        struct sock *sk = &bo->sk;
        struct bcm_op *op;
index 3ee690e8c7d32354a525ad398291b7b7c5155215..2f291f961a170018f1464fb50f4faa272887a50d 100644 (file)
@@ -445,9 +445,9 @@ static inline void cgw_unregister_filter(struct cgw_job *gwj)
 }
 
 static int cgw_notifier(struct notifier_block *nb,
-                       unsigned long msg, void *data)
+                       unsigned long msg, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)data;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
index 1085e65f848eab9a3a634ad16b37d70bd80830b5..641e1c895123ac114330635c43d0b2ed778c2ae7 100644 (file)
@@ -239,9 +239,9 @@ static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
 }
 
 static int raw_notifier(struct notifier_block *nb,
-                       unsigned long msg, void *data)
+                       unsigned long msg, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)data;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
        struct sock *sk = &ro->sk;
 
index a3395fdfbd4f6345cdf3bd3a12ebdc1d7f26ecbb..3a246a6cab473496e58275cbb367485acbbda4d8 100644 (file)
@@ -1204,6 +1204,7 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
        mutex_lock(&osdc->request_mutex);
        if (req->r_linger) {
                __unregister_linger_request(osdc, req);
+               req->r_linger = 0;
                ceph_osdc_put_request(req);
        }
        mutex_unlock(&osdc->request_mutex);
@@ -1674,13 +1675,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
                __register_request(osdc, req);
                __unregister_linger_request(osdc, req);
        }
+       reset_changed_osds(osdc);
        mutex_unlock(&osdc->request_mutex);
 
        if (needmap) {
                dout("%d requests for down osds, need new map\n", needmap);
                ceph_monc_request_next_osdmap(&osdc->client->monc);
        }
-       reset_changed_osds(osdc);
 }
 
 
@@ -2120,7 +2121,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
        down_read(&osdc->map_sem);
        mutex_lock(&osdc->request_mutex);
        __register_request(osdc, req);
-       WARN_ON(req->r_sent);
+       req->r_sent = 0;
+       req->r_got_reply = 0;
+       req->r_completed = 0;
        rc = __map_request(osdc, req, 0);
        if (rc < 0) {
                if (nofail) {
index 79ae884850015a99a1e43c4b2fc228670c3779b9..f0a1ba6c8086acc65a87e519fca48853a3bd091e 100644 (file)
@@ -734,19 +734,25 @@ static unsigned char nas[21] = {
 
 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
 {
-       return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
                                    unsigned int vlen, unsigned int flags)
 {
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
        return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                              flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
 {
-       return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
@@ -768,6 +774,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
        int datagrams;
        struct timespec ktspec;
 
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+
        if (COMPAT_USE_64BIT_TIME)
                return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                                      flags | MSG_CMSG_COMPAT,
index b71423db77851eed9bd91f06a6ac0630f967a3e5..9cbaba98ce4cb96f69c683ac5afbaf2b8593ace8 100644 (file)
@@ -56,6 +56,7 @@
 #include <net/sock.h>
 #include <net/tcp_states.h>
 #include <trace/events/skb.h>
+#include <net/ll_poll.h>
 
 /*
  *     Is a socket 'connection oriented' ?
@@ -207,6 +208,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                }
                spin_unlock_irqrestore(&queue->lock, cpu_flags);
 
+               if (sk_valid_ll(sk) && sk_poll_ll(sk, flags & MSG_DONTWAIT))
+                       continue;
+
                /* User doesn't want to wait */
                error = -EAGAIN;
                if (!timeo)
index fc1e289397f5895f3d2191ee78c9b450cc33e5cf..370354a9c5f6926e977ce4374541bc4b6e2e5dac 100644 (file)
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
 #include <linux/static_key.h>
+#include <linux/hashtable.h>
+#include <linux/vmalloc.h>
 
 #include "net-sysfs.h"
 
@@ -166,6 +168,12 @@ static struct list_head offload_base __read_mostly;
 DEFINE_RWLOCK(dev_base_lock);
 EXPORT_SYMBOL(dev_base_lock);
 
+/* protects napi_hash addition/deletion and napi_gen_id */
+static DEFINE_SPINLOCK(napi_hash_lock);
+
+static unsigned int napi_gen_id;
+static DEFINE_HASHTABLE(napi_hash, 8);
+
 seqcount_t devnet_rename_seq;
 
 static inline void dev_base_seq_inc(struct net *net)
@@ -1198,9 +1206,7 @@ static int __dev_open(struct net_device *dev)
         * If we don't do this there is a chance ndo_poll_controller
         * or ndo_poll may be running while we open the device
         */
-       ret = netpoll_rx_disable(dev);
-       if (ret)
-               return ret;
+       netpoll_rx_disable(dev);
 
        ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
        ret = notifier_to_errno(ret);
@@ -1309,9 +1315,7 @@ static int __dev_close(struct net_device *dev)
        LIST_HEAD(single);
 
        /* Temporarily disable netpoll until the interface is down */
-       retval = netpoll_rx_disable(dev);
-       if (retval)
-               return retval;
+       netpoll_rx_disable(dev);
 
        list_add(&dev->unreg_list, &single);
        retval = __dev_close_many(&single);
@@ -1353,14 +1357,11 @@ static int dev_close_many(struct list_head *head)
  */
 int dev_close(struct net_device *dev)
 {
-       int ret = 0;
        if (dev->flags & IFF_UP) {
                LIST_HEAD(single);
 
                /* Block netpoll rx while the interface is going down */
-               ret = netpoll_rx_disable(dev);
-               if (ret)
-                       return ret;
+               netpoll_rx_disable(dev);
 
                list_add(&dev->unreg_list, &single);
                dev_close_many(&single);
@@ -1368,7 +1369,7 @@ int dev_close(struct net_device *dev)
 
                netpoll_rx_enable(dev);
        }
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(dev_close);
 
@@ -1398,6 +1399,14 @@ void dev_disable_lro(struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_disable_lro);
 
+static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
+                                  struct net_device *dev)
+{
+       struct netdev_notifier_info info;
+
+       netdev_notifier_info_init(&info, dev);
+       return nb->notifier_call(nb, val, &info);
+}
 
 static int dev_boot_phase = 1;
 
@@ -1430,7 +1439,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
                goto unlock;
        for_each_net(net) {
                for_each_netdev(net, dev) {
-                       err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
+                       err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
                        err = notifier_to_errno(err);
                        if (err)
                                goto rollback;
@@ -1438,7 +1447,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
                        if (!(dev->flags & IFF_UP))
                                continue;
 
-                       nb->notifier_call(nb, NETDEV_UP, dev);
+                       call_netdevice_notifier(nb, NETDEV_UP, dev);
                }
        }
 
@@ -1454,10 +1463,11 @@ rollback:
                                goto outroll;
 
                        if (dev->flags & IFF_UP) {
-                               nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
-                               nb->notifier_call(nb, NETDEV_DOWN, dev);
+                               call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
+                                                       dev);
+                               call_netdevice_notifier(nb, NETDEV_DOWN, dev);
                        }
-                       nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+                       call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
                }
        }
 
@@ -1495,10 +1505,11 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
        for_each_net(net) {
                for_each_netdev(net, dev) {
                        if (dev->flags & IFF_UP) {
-                               nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
-                               nb->notifier_call(nb, NETDEV_DOWN, dev);
+                               call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
+                                                       dev);
+                               call_netdevice_notifier(nb, NETDEV_DOWN, dev);
                        }
-                       nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+                       call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
                }
        }
 unlock:
@@ -1507,6 +1518,25 @@ unlock:
 }
 EXPORT_SYMBOL(unregister_netdevice_notifier);
 
+/**
+ *     call_netdevice_notifiers_info - call all network notifier blocks
+ *     @val: value passed unmodified to notifier function
+ *     @dev: net_device pointer passed unmodified to notifier function
+ *     @info: notifier information data
+ *
+ *     Call all network notifier blocks.  Parameters and return value
+ *     are as for raw_notifier_call_chain().
+ */
+
+int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+                                 struct netdev_notifier_info *info)
+{
+       ASSERT_RTNL();
+       netdev_notifier_info_init(info, dev);
+       return raw_notifier_call_chain(&netdev_chain, val, info);
+}
+EXPORT_SYMBOL(call_netdevice_notifiers_info);
+
 /**
  *     call_netdevice_notifiers - call all network notifier blocks
  *      @val: value passed unmodified to notifier function
@@ -1518,8 +1548,9 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
 
 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 {
-       ASSERT_RTNL();
-       return raw_notifier_call_chain(&netdev_chain, val, dev);
+       struct netdev_notifier_info info;
+
+       return call_netdevice_notifiers_info(val, dev, &info);
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
@@ -1621,23 +1652,13 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
                }
        }
 
-       skb_orphan(skb);
-
        if (unlikely(!is_skb_forwardable(dev, skb))) {
                atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
-       skb->skb_iif = 0;
-       skb->dev = dev;
-       skb_dst_drop(skb);
-       skb->tstamp.tv64 = 0;
-       skb->pkt_type = PACKET_HOST;
+       skb_scrub_packet(skb);
        skb->protocol = eth_type_trans(skb, dev);
-       skb->mark = 0;
-       secpath_reset(skb);
-       nf_reset(skb);
-       nf_reset_trace(skb);
        return netif_rx(skb);
 }
 EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1702,7 +1723,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
                        skb_reset_mac_header(skb2);
 
                        if (skb_network_header(skb2) < skb2->data ||
-                           skb2->network_header > skb2->tail) {
+                           skb_network_header(skb2) > skb_tail_pointer(skb2)) {
                                net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
                                                     ntohs(skb2->protocol),
                                                     dev->name);
@@ -3065,6 +3086,46 @@ static int rps_ipi_queued(struct softnet_data *sd)
        return 0;
 }
 
+#ifdef CONFIG_NET_FLOW_LIMIT
+int netdev_flow_limit_table_len __read_mostly = (1 << 12);
+#endif
+
+static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
+{
+#ifdef CONFIG_NET_FLOW_LIMIT
+       struct sd_flow_limit *fl;
+       struct softnet_data *sd;
+       unsigned int old_flow, new_flow;
+
+       if (qlen < (netdev_max_backlog >> 1))
+               return false;
+
+       sd = &__get_cpu_var(softnet_data);
+
+       rcu_read_lock();
+       fl = rcu_dereference(sd->flow_limit);
+       if (fl) {
+               new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
+               old_flow = fl->history[fl->history_head];
+               fl->history[fl->history_head] = new_flow;
+
+               fl->history_head++;
+               fl->history_head &= FLOW_LIMIT_HISTORY - 1;
+
+               if (likely(fl->buckets[old_flow]))
+                       fl->buckets[old_flow]--;
+
+               if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
+                       fl->count++;
+                       rcu_read_unlock();
+                       return true;
+               }
+       }
+       rcu_read_unlock();
+#endif
+       return false;
+}
+
 /*
  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  * queue (may be a remote CPU queue).
@@ -3074,13 +3135,15 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
 {
        struct softnet_data *sd;
        unsigned long flags;
+       unsigned int qlen;
 
        sd = &per_cpu(softnet_data, cpu);
 
        local_irq_save(flags);
 
        rps_lock(sd);
-       if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
+       qlen = skb_queue_len(&sd->input_pkt_queue);
+       if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
                if (skb_queue_len(&sd->input_pkt_queue)) {
 enqueue:
                        __skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -3828,7 +3891,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
        NAPI_GRO_CB(skb)->frag0 = NULL;
        NAPI_GRO_CB(skb)->frag0_len = 0;
 
-       if (skb->mac_header == skb->tail &&
+       if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
            pinfo->nr_frags &&
            !PageHighMem(skb_frag_page(frag0))) {
                NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
@@ -4072,6 +4135,58 @@ void napi_complete(struct napi_struct *n)
 }
 EXPORT_SYMBOL(napi_complete);
 
+/* must be called under rcu_read_lock(), as we dont take a reference */
+struct napi_struct *napi_by_id(unsigned int napi_id)
+{
+       unsigned int hash = napi_id % HASH_SIZE(napi_hash);
+       struct napi_struct *napi;
+
+       hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
+               if (napi->napi_id == napi_id)
+                       return napi;
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(napi_by_id);
+
+void napi_hash_add(struct napi_struct *napi)
+{
+       if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
+
+               spin_lock(&napi_hash_lock);
+
+               /* 0 is not a valid id, we also skip an id that is taken
+                * we expect both events to be extremely rare
+                */
+               napi->napi_id = 0;
+               while (!napi->napi_id) {
+                       napi->napi_id = ++napi_gen_id;
+                       if (napi_by_id(napi->napi_id))
+                               napi->napi_id = 0;
+               }
+
+               hlist_add_head_rcu(&napi->napi_hash_node,
+                       &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+
+               spin_unlock(&napi_hash_lock);
+       }
+}
+EXPORT_SYMBOL_GPL(napi_hash_add);
+
+/* Warning : caller is responsible to make sure rcu grace period
+ * is respected before freeing memory containing @napi
+ */
+void napi_hash_del(struct napi_struct *napi)
+{
+       spin_lock(&napi_hash_lock);
+
+       if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
+               hlist_del_rcu(&napi->napi_hash_node);
+
+       spin_unlock(&napi_hash_lock);
+}
+EXPORT_SYMBOL_GPL(napi_hash_del);
+
 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
                    int (*poll)(struct napi_struct *, int), int weight)
 {
@@ -4370,7 +4485,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        else
                list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
        dev_hold(upper_dev);
-
+       call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
        return 0;
 }
 
@@ -4430,6 +4545,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
        list_del_rcu(&upper->list);
        dev_put(upper_dev);
        kfree_rcu(upper, rcu);
+       call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
@@ -4700,8 +4816,13 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
        }
 
        if (dev->flags & IFF_UP &&
-           (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
-               call_netdevice_notifiers(NETDEV_CHANGE, dev);
+           (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
+               struct netdev_notifier_change_info change_info;
+
+               change_info.flags_changed = changes;
+               call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
+                                             &change_info.info);
+       }
 }
 
 /**
@@ -5124,17 +5245,28 @@ static void netdev_init_one_queue(struct net_device *dev,
 #endif
 }
 
+static void netif_free_tx_queues(struct net_device *dev)
+{
+       if (is_vmalloc_addr(dev->_tx))
+               vfree(dev->_tx);
+       else
+               kfree(dev->_tx);
+}
+
 static int netif_alloc_netdev_queues(struct net_device *dev)
 {
        unsigned int count = dev->num_tx_queues;
        struct netdev_queue *tx;
+       size_t sz = count * sizeof(*tx);
 
-       BUG_ON(count < 1);
-
-       tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
-       if (!tx)
-               return -ENOMEM;
+       BUG_ON(count < 1 || count > 0xffff);
 
+       tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!tx) {
+               tx = vzalloc(sz);
+               if (!tx)
+                       return -ENOMEM;
+       }
        dev->_tx = tx;
 
        netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
@@ -5235,6 +5367,10 @@ int register_netdevice(struct net_device *dev)
         */
        dev->hw_enc_features |= NETIF_F_SG;
 
+       /* Make NETIF_F_SG inheritable to MPLS.
+        */
+       dev->mpls_features |= NETIF_F_SG;
+
        ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
        ret = notifier_to_errno(ret);
        if (ret)
@@ -5678,7 +5814,7 @@ free_all:
 
 free_pcpu:
        free_percpu(dev->pcpu_refcnt);
-       kfree(dev->_tx);
+       netif_free_tx_queues(dev);
 #ifdef CONFIG_RPS
        kfree(dev->_rx);
 #endif
@@ -5703,7 +5839,7 @@ void free_netdev(struct net_device *dev)
 
        release_net(dev_net(dev));
 
-       kfree(dev->_tx);
+       netif_free_tx_queues(dev);
 #ifdef CONFIG_RPS
        kfree(dev->_rx);
 #endif
@@ -6014,7 +6150,7 @@ netdev_features_t netdev_increment_features(netdev_features_t all,
 }
 EXPORT_SYMBOL(netdev_increment_features);
 
-static struct hlist_head *netdev_create_hash(void)
+static struct hlist_head * __net_init netdev_create_hash(void)
 {
        int i;
        struct hlist_head *hash;
@@ -6270,6 +6406,10 @@ static int __init net_dev_init(void)
                sd->backlog.weight = weight_p;
                sd->backlog.gro_list = NULL;
                sd->backlog.gro_count = 0;
+
+#ifdef CONFIG_NET_FLOW_LIMIT
+               sd->flow_limit = NULL;
+#endif
        }
 
        dev_boot_phase = 0;
index c013f38482a1ef6572a9de27a2aa39946971b40b..6cda4e2c2132e345f82e3cd0feaeaaa16fbd75e6 100644 (file)
@@ -39,6 +39,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
        ha->refcount = 1;
        ha->global_use = global;
        ha->synced = sync;
+       ha->sync_cnt = 0;
        list_add_tail_rcu(&ha->list, &list->list);
        list->count++;
 
@@ -66,7 +67,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
                        }
                        if (sync) {
                                if (ha->synced)
-                                       return 0;
+                                       return -EEXIST;
                                else
                                        ha->synced = true;
                        }
@@ -139,10 +140,13 @@ static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
 
        err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
                               false, true);
-       if (err)
+       if (err && err != -EEXIST)
                return err;
-       ha->sync_cnt++;
-       ha->refcount++;
+
+       if (!err) {
+               ha->sync_cnt++;
+               ha->refcount++;
+       }
 
        return 0;
 }
@@ -159,7 +163,8 @@ static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
        if (err)
                return;
        ha->sync_cnt--;
-       __hw_addr_del_entry(from_list, ha, false, true);
+       /* address on from list is not marked synced */
+       __hw_addr_del_entry(from_list, ha, false, false);
 }
 
 static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
@@ -796,7 +801,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
                return -EINVAL;
 
        netif_addr_lock_nested(to);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+       err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
        netif_addr_unlock(to);
index d23b6682f4e95cfd029cd19db31252184ec03d2d..5e78d44333b9bc39fdb56fdd489d0ac4f7f5d4e8 100644 (file)
@@ -295,9 +295,9 @@ static int net_dm_cmd_trace(struct sk_buff *skb,
 }
 
 static int dropmon_net_event(struct notifier_block *ev_block,
-                       unsigned long event, void *ptr)
+                            unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct dm_hw_stat_delta *new_stat = NULL;
        struct dm_hw_stat_delta *tmp;
 
index df9cc810ec8e3a78bbdb3b4480deeb8fd35df721..ca4231ec734787be93c9ebc1e6c69d4a30bf24a5 100644 (file)
@@ -372,7 +372,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 static int dst_dev_event(struct notifier_block *this, unsigned long event,
                         void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct dst_entry *dst, *last = NULL;
 
        switch (event) {
index 22efdaa76ebf9909db7a69d59d54de191fd00d69..9255bbdf81ff7dbe1e86f083bae8e67479e0d8c0 100644 (file)
@@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_IPV6_CSUM_BIT] =        "tx-checksum-ipv6",
        [NETIF_F_HIGHDMA_BIT] =          "highdma",
        [NETIF_F_FRAGLIST_BIT] =         "tx-scatter-gather-fraglist",
-       [NETIF_F_HW_VLAN_CTAG_TX_BIT] =  "tx-vlan-ctag-hw-insert",
+       [NETIF_F_HW_VLAN_CTAG_TX_BIT] =  "tx-vlan-hw-insert",
 
-       [NETIF_F_HW_VLAN_CTAG_RX_BIT] =  "rx-vlan-ctag-hw-parse",
-       [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter",
+       [NETIF_F_HW_VLAN_CTAG_RX_BIT] =  "rx-vlan-hw-parse",
+       [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
        [NETIF_F_HW_VLAN_STAG_TX_BIT] =  "tx-vlan-stag-hw-insert",
        [NETIF_F_HW_VLAN_STAG_RX_BIT] =  "rx-vlan-stag-hw-parse",
        [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
@@ -82,6 +82,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
        [NETIF_F_GSO_GRE_BIT] =          "tx-gre-segmentation",
        [NETIF_F_GSO_UDP_TUNNEL_BIT] =   "tx-udp_tnl-segmentation",
+       [NETIF_F_GSO_MPLS_BIT] =         "tx-mpls-segmentation",
 
        [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
        [NETIF_F_SCTP_CSUM_BIT] =        "tx-checksum-sctp",
@@ -1413,7 +1414,7 @@ static int ethtool_get_module_eeprom(struct net_device *dev,
                                      modinfo.eeprom_len);
 }
 
-/* The main entry point in this file.  Called from net/core/dev.c */
+/* The main entry point in this file.  Called from net/core/dev_ioctl.c */
 
 int dev_ethtool(struct net *net, struct ifreq *ifr)
 {
index d5a9f8ead0d864305110f2ca6f1dc70fca5b84cd..21735440c44a85b611308e6b3c97bef77452c9a1 100644 (file)
@@ -705,9 +705,9 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
 
 
 static int fib_rules_event(struct notifier_block *this, unsigned long event,
-                           void *ptr)
+                          void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
        struct fib_rules_ops *ops;
 
index dad2a178f9f8a477488f091962c5e771d1d117b3..6438f29ff26650b240be40d7d80953dc28f13cb0 100644 (file)
@@ -778,7 +778,7 @@ int sk_detach_filter(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(sk_detach_filter);
 
-static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
 {
        static const u16 decodes[] = {
                [BPF_S_ALU_ADD_K]       = BPF_ALU|BPF_ADD|BPF_K,
index d9d198aa9fed0738288973815b427ec6bae0de4a..6b5b6e7013cafec8a7b3767ed97a610bf35f1ae6 100644 (file)
@@ -82,7 +82,7 @@ struct gen_estimator
 {
        struct list_head        list;
        struct gnet_stats_basic_packed  *bstats;
-       struct gnet_stats_rate_est      *rate_est;
+       struct gnet_stats_rate_est64    *rate_est;
        spinlock_t              *stats_lock;
        int                     ewma_log;
        u64                     last_bytes;
@@ -167,7 +167,7 @@ static void gen_add_node(struct gen_estimator *est)
 
 static
 struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
-                                   const struct gnet_stats_rate_est *rate_est)
+                                   const struct gnet_stats_rate_est64 *rate_est)
 {
        struct rb_node *p = est_root.rb_node;
 
@@ -203,7 +203,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
  *
  */
 int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
-                     struct gnet_stats_rate_est *rate_est,
+                     struct gnet_stats_rate_est64 *rate_est,
                      spinlock_t *stats_lock,
                      struct nlattr *opt)
 {
@@ -258,7 +258,7 @@ EXPORT_SYMBOL(gen_new_estimator);
  * Note : Caller should respect an RCU grace period before freeing stats_lock
  */
 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
-                       struct gnet_stats_rate_est *rate_est)
+                       struct gnet_stats_rate_est64 *rate_est)
 {
        struct gen_estimator *e;
 
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
  * Returns 0 on success or a negative error code.
  */
 int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
-                         struct gnet_stats_rate_est *rate_est,
+                         struct gnet_stats_rate_est64 *rate_est,
                          spinlock_t *stats_lock, struct nlattr *opt)
 {
        gen_kill_estimator(bstats, rate_est);
@@ -306,7 +306,7 @@ EXPORT_SYMBOL(gen_replace_estimator);
  * Returns true if estimator is active, and false if not.
  */
 bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
-                         const struct gnet_stats_rate_est *rate_est)
+                         const struct gnet_stats_rate_est64 *rate_est)
 {
        bool res;
 
index ddedf211e588146f17eb2e147307d04991720170..9d3d9e78397b0f90f379addab25c7fd78294b3bc 100644 (file)
@@ -143,18 +143,30 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
 int
 gnet_stats_copy_rate_est(struct gnet_dump *d,
                         const struct gnet_stats_basic_packed *b,
-                        struct gnet_stats_rate_est *r)
+                        struct gnet_stats_rate_est64 *r)
 {
+       struct gnet_stats_rate_est est;
+       int res;
+
        if (b && !gen_estimator_active(b, r))
                return 0;
 
+       est.bps = min_t(u64, UINT_MAX, r->bps);
+       /* we have some time before reaching 2^32 packets per second */
+       est.pps = r->pps;
+
        if (d->compat_tc_stats) {
-               d->tc_stats.bps = r->bps;
-               d->tc_stats.pps = r->pps;
+               d->tc_stats.bps = est.bps;
+               d->tc_stats.pps = est.pps;
        }
 
-       if (d->tail)
-               return gnet_stats_copy(d, TCA_STATS_RATE_EST, r, sizeof(*r));
+       if (d->tail) {
+               res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est));
+               if (res < 0 || est.bps == r->bps)
+                       return res;
+               /* emit 64bit stats only if needed */
+               return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r));
+       }
 
        return 0;
 }
index 7e7aeb01de45cf3236469d7f62b51254b73abac4..de178e462682af6c97dbfc4326df85942ec25174 100644 (file)
@@ -73,31 +73,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
        return err;
 }
 
-/*
- *     Copy kernel to iovec. Returns -EFAULT on error.
- *
- *     Note: this modifies the original iovec.
- */
-
-int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
-{
-       while (len > 0) {
-               if (iov->iov_len) {
-                       int copy = min_t(unsigned int, iov->iov_len, len);
-                       if (copy_to_user(iov->iov_base, kdata, copy))
-                               return -EFAULT;
-                       kdata += copy;
-                       len -= copy;
-                       iov->iov_len -= copy;
-                       iov->iov_base += copy;
-               }
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovec);
-
 /*
  *     Copy kernel to iovec. Returns -EFAULT on error.
  */
@@ -124,31 +99,6 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
 }
 EXPORT_SYMBOL(memcpy_toiovecend);
 
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- *
- *     Note: this modifies the original iovec.
- */
-
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
-{
-       while (len > 0) {
-               if (iov->iov_len) {
-                       int copy = min_t(unsigned int, len, iov->iov_len);
-                       if (copy_from_user(kdata, iov->iov_base, copy))
-                               return -EFAULT;
-                       len -= copy;
-                       kdata += copy;
-                       iov->iov_base += copy;
-                       iov->iov_len -= copy;
-               }
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovec);
-
 /*
  *     Copy iovec from kernel. Returns -EFAULT on error.
  */
index 8f82a5cc3851d3a61953a6cad926c9040bc770a7..9c3a839322baccbf69079747a4588079ce46d029 100644 (file)
@@ -92,6 +92,9 @@ static bool linkwatch_urgent_event(struct net_device *dev)
        if (dev->ifindex != dev->iflink)
                return true;
 
+       if (dev->priv_flags & IFF_TEAM_PORT)
+               return true;
+
        return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
 }
 
index 5c56b217b999d114ea7954a9e6060a77d906ceff..2569ab2cafbe0e557b65a68166d932118a28ffc9 100644 (file)
@@ -1419,7 +1419,7 @@ static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
 
        for (p = &tbl->parms; p; p = p->next) {
                if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
-                   (!p->dev && !ifindex))
+                   (!p->dev && !ifindex && net_eq(net, &init_net)))
                        return p;
        }
 
@@ -1429,15 +1429,11 @@ static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
                                      struct neigh_table *tbl)
 {
-       struct neigh_parms *p, *ref;
+       struct neigh_parms *p;
        struct net *net = dev_net(dev);
        const struct net_device_ops *ops = dev->netdev_ops;
 
-       ref = lookup_neigh_parms(tbl, net, 0);
-       if (!ref)
-               return NULL;
-
-       p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
+       p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
        if (p) {
                p->tbl            = tbl;
                atomic_set(&p->refcnt, 1);
@@ -2053,6 +2049,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
        }
 
+       err = -ENOENT;
+       if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
+            tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
+           !net_eq(net, &init_net))
+               goto errout_tbl_lock;
+
        if (tb[NDTA_THRESH1])
                tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
 
@@ -2765,11 +2767,11 @@ EXPORT_SYMBOL(neigh_app_ns);
 static int zero;
 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
 
-static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
-                          size_t *lenp, loff_t *ppos)
+static int proc_unres_qlen(struct ctl_table *ctl, int write,
+                          void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int size, ret;
-       ctl_table tmp = *ctl;
+       struct ctl_table tmp = *ctl;
 
        tmp.extra1 = &zero;
        tmp.extra2 = &unres_qlen_max;
index 569d355fec3e8adac815c1819e43abbac98fcf39..2bf83299600a4ac8eb56069295bbc755709642b1 100644 (file)
@@ -146,11 +146,23 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
 static int softnet_seq_show(struct seq_file *seq, void *v)
 {
        struct softnet_data *sd = v;
+       unsigned int flow_limit_count = 0;
 
-       seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+#ifdef CONFIG_NET_FLOW_LIMIT
+       struct sd_flow_limit *fl;
+
+       rcu_read_lock();
+       fl = rcu_dereference(sd->flow_limit);
+       if (fl)
+               flow_limit_count = fl->count;
+       rcu_read_unlock();
+#endif
+
+       seq_printf(seq,
+                  "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
                   sd->processed, sd->dropped, sd->time_squeeze, 0,
                   0, 0, 0, 0, /* was fastroute */
-                  sd->cpu_collision, sd->received_rps);
+                  sd->cpu_collision, sd->received_rps, flow_limit_count);
        return 0;
 }
 
index cec074be8c4378b56a01889165d55cc282808208..03c8ec3edc7208715f76ab06c68154de30d089af 100644 (file)
@@ -247,7 +247,7 @@ static void netpoll_poll_dev(struct net_device *dev)
        zap_completion_queue();
 }
 
-int netpoll_rx_disable(struct net_device *dev)
+void netpoll_rx_disable(struct net_device *dev)
 {
        struct netpoll_info *ni;
        int idx;
@@ -257,7 +257,6 @@ int netpoll_rx_disable(struct net_device *dev)
        if (ni)
                down(&ni->dev_lock);
        srcu_read_unlock(&netpoll_srcu, idx);
-       return 0;
 }
 EXPORT_SYMBOL(netpoll_rx_disable);
 
@@ -690,25 +689,20 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
                        send_skb->dev = skb->dev;
 
                        skb_reset_network_header(send_skb);
-                       skb_put(send_skb, sizeof(struct ipv6hdr));
-                       hdr = ipv6_hdr(send_skb);
-
+                       hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
                        *(__be32*)hdr = htonl(0x60000000);
-
                        hdr->payload_len = htons(size);
                        hdr->nexthdr = IPPROTO_ICMPV6;
                        hdr->hop_limit = 255;
                        hdr->saddr = *saddr;
                        hdr->daddr = *daddr;
 
-                       send_skb->transport_header = send_skb->tail;
-                       skb_put(send_skb, size);
-
-                       icmp6h = (struct icmp6hdr *)skb_transport_header(skb);
+                       icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
                        icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
                        icmp6h->icmp6_router = 0;
                        icmp6h->icmp6_solicited = 1;
-                       target = (struct in6_addr *)(skb_transport_header(send_skb) + sizeof(struct icmp6hdr));
+
+                       target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
                        *target = msg->target;
                        icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
                                                              IPPROTO_ICMPV6,
index 0777d0aa18c3814a42a338a25d3508d9654c5797..e533259dce3ccad8a04cc095fa0056b8693fde70 100644 (file)
@@ -261,7 +261,7 @@ struct cgroup_subsys net_prio_subsys = {
 static int netprio_device_event(struct notifier_block *unused,
                                unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct netprio_map *old;
 
        /*
index 11f2704c3810d54b3539daddea36d50ef230636f..9640972ec50e5658eac7493fb0da180a0de353c9 100644 (file)
@@ -1921,7 +1921,7 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
 static int pktgen_device_event(struct notifier_block *unused,
                               unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id);
 
        if (pn->pktgen_exiting)
@@ -2627,6 +2627,29 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
        pgh->tv_usec = htonl(timestamp.tv_usec);
 }
 
+static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
+                                       struct pktgen_dev *pkt_dev,
+                                       unsigned int extralen)
+{
+       struct sk_buff *skb = NULL;
+       unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
+                           pkt_dev->pkt_overhead;
+
+       if (pkt_dev->flags & F_NODE) {
+               int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
+
+               skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
+               if (likely(skb)) {
+                       skb_reserve(skb, NET_SKB_PAD);
+                       skb->dev = dev;
+               }
+       } else {
+                skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
+       }
+
+       return skb;
+}
+
 static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
                                        struct pktgen_dev *pkt_dev)
 {
@@ -2657,32 +2680,13 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
 
        datalen = (odev->hard_header_len + 16) & ~0xf;
 
-       if (pkt_dev->flags & F_NODE) {
-               int node;
-
-               if (pkt_dev->node >= 0)
-                       node = pkt_dev->node;
-               else
-                       node =  numa_node_id();
-
-               skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
-                                 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
-               if (likely(skb)) {
-                       skb_reserve(skb, NET_SKB_PAD);
-                       skb->dev = odev;
-               }
-       }
-       else
-         skb = __netdev_alloc_skb(odev,
-                                  pkt_dev->cur_pkt_size + 64
-                                  + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
-
+       skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
        if (!skb) {
                sprintf(pkt_dev->result, "No memory");
                return NULL;
        }
-       prefetchw(skb->data);
 
+       prefetchw(skb->data);
        skb_reserve(skb, datalen);
 
        /*  Reserve for ethernet and IP header  */
@@ -2708,15 +2712,15 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
                *vlan_encapsulated_proto = htons(ETH_P_IP);
        }
 
-       skb->network_header = skb->tail;
-       skb->transport_header = skb->network_header + sizeof(struct iphdr);
-       skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
+       skb_set_mac_header(skb, 0);
+       skb_set_network_header(skb, skb->len);
+       iph = (struct iphdr *) skb_put(skb, sizeof(struct iphdr));
+
+       skb_set_transport_header(skb, skb->len);
+       udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr));
        skb_set_queue_mapping(skb, queue_map);
        skb->priority = pkt_dev->skb_priority;
 
-       iph = ip_hdr(skb);
-       udph = udp_hdr(skb);
-
        memcpy(eth, pkt_dev->hh, 12);
        *(__be16 *) & eth[12] = protocol;
 
@@ -2746,8 +2750,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        iph->check = 0;
        iph->check = ip_fast_csum((void *)iph, iph->ihl);
        skb->protocol = protocol;
-       skb->mac_header = (skb->network_header - ETH_HLEN -
-                          pkt_dev->pkt_overhead);
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
        pktgen_finalize_skb(pkt_dev, skb, datalen);
@@ -2788,15 +2790,13 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        mod_cur_headers(pkt_dev);
        queue_map = pkt_dev->cur_queue_map;
 
-       skb = __netdev_alloc_skb(odev,
-                                pkt_dev->cur_pkt_size + 64
-                                + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT);
+       skb = pktgen_alloc_skb(odev, pkt_dev, 16);
        if (!skb) {
                sprintf(pkt_dev->result, "No memory");
                return NULL;
        }
-       prefetchw(skb->data);
 
+       prefetchw(skb->data);
        skb_reserve(skb, 16);
 
        /*  Reserve for ethernet and IP header  */
@@ -2822,13 +2822,14 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
                *vlan_encapsulated_proto = htons(ETH_P_IPV6);
        }
 
-       skb->network_header = skb->tail;
-       skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
-       skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
+       skb_set_mac_header(skb, 0);
+       skb_set_network_header(skb, skb->len);
+       iph = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
+
+       skb_set_transport_header(skb, skb->len);
+       udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr));
        skb_set_queue_mapping(skb, queue_map);
        skb->priority = pkt_dev->skb_priority;
-       iph = ipv6_hdr(skb);
-       udph = udp_hdr(skb);
 
        memcpy(eth, pkt_dev->hh, 12);
        *(__be16 *) &eth[12] = protocol;
@@ -2863,8 +2864,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        iph->daddr = pkt_dev->cur_in6_daddr;
        iph->saddr = pkt_dev->cur_in6_saddr;
 
-       skb->mac_header = (skb->network_header - ETH_HLEN -
-                          pkt_dev->pkt_overhead);
        skb->protocol = protocol;
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
index a08bd2b7fe3f06901d6c75e82df5e51e1aa2234f..9007533867f0e99cc27cf3c19cc0539de558a8b6 100644 (file)
@@ -947,6 +947,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        struct ifla_vf_vlan vf_vlan;
                        struct ifla_vf_tx_rate vf_tx_rate;
                        struct ifla_vf_spoofchk vf_spoofchk;
+                       struct ifla_vf_link_state vf_linkstate;
 
                        /*
                         * Not all SR-IOV capable drivers support the
@@ -956,18 +957,24 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                         */
                        ivi.spoofchk = -1;
                        memset(ivi.mac, 0, sizeof(ivi.mac));
+                       /* The default value for VF link state is "auto"
+                        * IFLA_VF_LINK_STATE_AUTO which equals zero
+                        */
+                       ivi.linkstate = 0;
                        if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
                                break;
                        vf_mac.vf =
                                vf_vlan.vf =
                                vf_tx_rate.vf =
-                               vf_spoofchk.vf = ivi.vf;
+                               vf_spoofchk.vf =
+                               vf_linkstate.vf = ivi.vf;
 
                        memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
                        vf_vlan.vlan = ivi.vlan;
                        vf_vlan.qos = ivi.qos;
                        vf_tx_rate.rate = ivi.tx_rate;
                        vf_spoofchk.setting = ivi.spoofchk;
+                       vf_linkstate.link_state = ivi.linkstate;
                        vf = nla_nest_start(skb, IFLA_VF_INFO);
                        if (!vf) {
                                nla_nest_cancel(skb, vfinfo);
@@ -978,7 +985,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                            nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
                                    &vf_tx_rate) ||
                            nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
-                                   &vf_spoofchk))
+                                   &vf_spoofchk) ||
+                           nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
+                                   &vf_linkstate))
                                goto nla_put_failure;
                        nla_nest_end(skb, vf);
                }
@@ -1238,6 +1247,15 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
                                                               ivs->setting);
                        break;
                }
+               case IFLA_VF_LINK_STATE: {
+                       struct ifla_vf_link_state *ivl;
+                       ivl = nla_data(vf);
+                       err = -EOPNOTSUPP;
+                       if (ops->ndo_set_vf_link_state)
+                               err = ops->ndo_set_vf_link_state(dev, ivl->vf,
+                                                                ivl->link_state);
+                       break;
+               }
                default:
                        err = -EINVAL;
                        break;
@@ -2667,7 +2685,7 @@ static void rtnetlink_rcv(struct sk_buff *skb)
 
 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        switch (event) {
        case NETDEV_UP:
index af9185d0be6a9e3b3b81c10d061b82992c40fc1d..b1fcb8727e5640b7c3f19e84d03d3aaa02f1f583 100644 (file)
@@ -195,13 +195,11 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
         * the tail pointer in struct sk_buff!
         */
        memset(skb, 0, offsetof(struct sk_buff, tail));
-       skb->data = NULL;
+       skb->head = NULL;
        skb->truesize = sizeof(struct sk_buff);
        atomic_set(&skb->users, 1);
 
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
-       skb->mac_header = ~0U;
-#endif
+       skb->mac_header = (typeof(skb->mac_header))~0U;
 out:
        return skb;
 }
@@ -275,10 +273,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        skb->data = data;
        skb_reset_tail_pointer(skb);
        skb->end = skb->tail + size;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
-       skb->mac_header = ~0U;
-       skb->transport_header = ~0U;
-#endif
+       skb->mac_header = (typeof(skb->mac_header))~0U;
+       skb->transport_header = (typeof(skb->transport_header))~0U;
 
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
@@ -344,10 +340,8 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
        skb->data = data;
        skb_reset_tail_pointer(skb);
        skb->end = skb->tail + size;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
-       skb->mac_header = ~0U;
-       skb->transport_header = ~0U;
-#endif
+       skb->mac_header = (typeof(skb->mac_header))~0U;
+       skb->transport_header = (typeof(skb->transport_header))~0U;
 
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
@@ -611,7 +605,7 @@ static void skb_release_head_state(struct sk_buff *skb)
 static void skb_release_all(struct sk_buff *skb)
 {
        skb_release_head_state(skb);
-       if (likely(skb->data))
+       if (likely(skb->head))
                skb_release_data(skb);
 }
 
@@ -739,6 +733,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->vlan_tci           = old->vlan_tci;
 
        skb_copy_secmark(new, old);
+
+#ifdef CONFIG_NET_LL_RX_POLL
+       new->napi_id    = old->napi_id;
+#endif
 }
 
 /*
@@ -911,18 +909,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
 
 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
-#ifndef NET_SKBUFF_DATA_USES_OFFSET
-       /*
-        *      Shift between the two data areas in bytes
-        */
-       unsigned long offset = new->data - old->data;
-#endif
-
        __copy_skb_header(new, old);
 
-#ifndef NET_SKBUFF_DATA_USES_OFFSET
-       skb_headers_offset_update(new, offset);
-#endif
        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
@@ -1114,7 +1102,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        skb->end      = skb->head + size;
 #endif
        skb->tail             += off;
-       skb_headers_offset_update(skb, off);
+       skb_headers_offset_update(skb, nhead);
        /* Only adjust this if it actually is csum_start rather than csum */
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                skb->csum_start += nhead;
@@ -1209,9 +1197,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
        off                  = newheadroom - oldheadroom;
        if (n->ip_summed == CHECKSUM_PARTIAL)
                n->csum_start += off;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
+
        skb_headers_offset_update(n, off);
-#endif
 
        return n;
 }
@@ -2554,8 +2541,13 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
        skb_frag_t *frag;
 
-       if (unlikely(abs_offset >= st->upper_offset))
+       if (unlikely(abs_offset >= st->upper_offset)) {
+               if (st->frag_data) {
+                       kunmap_atomic(st->frag_data);
+                       st->frag_data = NULL;
+               }
                return 0;
+       }
 
 next_skb:
        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
@@ -2853,7 +2845,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                                                 doffset + tnl_hlen);
 
                if (fskb != skb_shinfo(skb)->frag_list)
-                       continue;
+                       goto perform_csum_check;
 
                if (!sg) {
                        nskb->ip_summed = CHECKSUM_NONE;
@@ -2917,6 +2909,7 @@ skip_fraglist:
                nskb->len += nskb->data_len;
                nskb->truesize += nskb->data_len;
 
+perform_csum_check:
                if (!csum) {
                        nskb->csum = skb_checksum(nskb, doffset,
                                                  nskb->len - doffset, 0);
@@ -3499,3 +3492,26 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
        return true;
 }
 EXPORT_SYMBOL(skb_try_coalesce);
+
+/**
+ * skb_scrub_packet - scrub an skb before sending it to another netns
+ *
+ * @skb: buffer to clean
+ *
+ * skb_scrub_packet can be used to clean an skb before injecting it in
+ * another namespace. We have to clear all information in the skb that
+ * could impact namespace isolation.
+ */
+void skb_scrub_packet(struct sk_buff *skb)
+{
+       skb_orphan(skb);
+       skb->tstamp.tv64 = 0;
+       skb->pkt_type = PACKET_HOST;
+       skb->skb_iif = 0;
+       skb_dst_drop(skb);
+       skb->mark = 0;
+       secpath_reset(skb);
+       nf_reset(skb);
+       nf_reset_trace(skb);
+}
+EXPORT_SYMBOL_GPL(skb_scrub_packet);
index d4f4cea726e7bfdb0d9cc47a779d5252738bf37c..b6c619f4d47b79f9386225fd9ade718b9f7e1e58 100644 (file)
 #include <net/tcp.h>
 #endif
 
+#include <net/ll_poll.h>
+
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
@@ -210,7 +212,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
-  "sk_lock-AF_NFC"   , "sk_lock-AF_MAX"
+  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -226,7 +228,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
-  "slock-AF_NFC"   , "slock-AF_MAX"
+  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -242,7 +244,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
-  "clock-AF_NFC"   , "clock-AF_MAX"
+  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
 };
 
 /*
@@ -911,6 +913,19 @@ set_rcvbuf:
                sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
                break;
 
+#ifdef CONFIG_NET_LL_RX_POLL
+       case SO_LL:
+               /* allow unprivileged users to decrease the value */
+               if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
+                       ret = -EPERM;
+               else {
+                       if (val < 0)
+                               ret = -EINVAL;
+                       else
+                               sk->sk_ll_usec = val;
+               }
+               break;
+#endif
        default:
                ret = -ENOPROTOOPT;
                break;
@@ -1168,6 +1183,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
                break;
 
+#ifdef CONFIG_NET_LL_RX_POLL
+       case SO_LL:
+               v.val = sk->sk_ll_usec;
+               break;
+#endif
+
        default:
                return -ENOPROTOOPT;
        }
@@ -1217,18 +1238,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
 #endif
 }
 
-/*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
- * un-modified. Special care is taken when initializing object to zero.
- */
-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
-{
-       if (offsetof(struct sock, sk_node.next) != 0)
-               memset(sk, 0, offsetof(struct sock, sk_node.next));
-       memset(&sk->sk_node.pprev, 0,
-              size - offsetof(struct sock, sk_node.pprev));
-}
-
 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
 {
        unsigned long nulls1, nulls2;
@@ -2296,6 +2305,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
        sk->sk_stamp = ktime_set(-1L, 0);
 
+#ifdef CONFIG_NET_LL_RX_POLL
+       sk->sk_napi_id          =       0;
+       sk->sk_ll_usec          =       sysctl_net_ll_read;
+#endif
+
        /*
         * Before updating sk_refcnt, we must commit prior changes to memory
         * (Documentation/RCU/rculist_nulls.txt for details)
index d5bef0b0f63968bdfc906921bcc99522154750cc..a0e9cf6379de3eac8ac1182c3be73e8d140b4a1b 100644 (file)
@@ -73,8 +73,13 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
                goto out;
        }
 
-       if (filter)
-               memcpy(nla_data(attr), filter->insns, len);
+       if (filter) {
+               struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
+               int i;
+
+               for (i = 0; i < filter->len; i++, fb++)
+                       sk_decode_filter(&filter->insns[i], fb);
+       }
 
 out:
        rcu_read_unlock();
index cfdb46ab3a7f866dd77957abc2f202d673c99bd5..afc677eadd9323bd5673c1ae5f0aacbb07f15d6c 100644 (file)
 #include <net/ip.h>
 #include <net/sock.h>
 #include <net/net_ratelimit.h>
+#include <net/ll_poll.h>
 
 static int one = 1;
 
 #ifdef CONFIG_RPS
-static int rps_sock_flow_sysctl(ctl_table *table, int write,
+static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        unsigned int orig_size, size;
        int ret, i;
-       ctl_table tmp = {
+       struct ctl_table tmp = {
                .data = &size,
                .maxlen = sizeof(size),
                .mode = table->mode
@@ -87,6 +88,109 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 }
 #endif /* CONFIG_RPS */
 
+#ifdef CONFIG_NET_FLOW_LIMIT
+static DEFINE_MUTEX(flow_limit_update_mutex);
+
+static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
+                                void __user *buffer, size_t *lenp,
+                                loff_t *ppos)
+{
+       struct sd_flow_limit *cur;
+       struct softnet_data *sd;
+       cpumask_var_t mask;
+       int i, len, ret = 0;
+
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       if (write) {
+               ret = cpumask_parse_user(buffer, *lenp, mask);
+               if (ret)
+                       goto done;
+
+               mutex_lock(&flow_limit_update_mutex);
+               len = sizeof(*cur) + netdev_flow_limit_table_len;
+               for_each_possible_cpu(i) {
+                       sd = &per_cpu(softnet_data, i);
+                       cur = rcu_dereference_protected(sd->flow_limit,
+                                    lockdep_is_held(&flow_limit_update_mutex));
+                       if (cur && !cpumask_test_cpu(i, mask)) {
+                               RCU_INIT_POINTER(sd->flow_limit, NULL);
+                               synchronize_rcu();
+                               kfree(cur);
+                       } else if (!cur && cpumask_test_cpu(i, mask)) {
+                               cur = kzalloc(len, GFP_KERNEL);
+                               if (!cur) {
+                                       /* not unwinding previous changes */
+                                       ret = -ENOMEM;
+                                       goto write_unlock;
+                               }
+                               cur->num_buckets = netdev_flow_limit_table_len;
+                               rcu_assign_pointer(sd->flow_limit, cur);
+                       }
+               }
+write_unlock:
+               mutex_unlock(&flow_limit_update_mutex);
+       } else {
+               char kbuf[128];
+
+               if (*ppos || !*lenp) {
+                       *lenp = 0;
+                       goto done;
+               }
+
+               cpumask_clear(mask);
+               rcu_read_lock();
+               for_each_possible_cpu(i) {
+                       sd = &per_cpu(softnet_data, i);
+                       if (rcu_dereference(sd->flow_limit))
+                               cpumask_set_cpu(i, mask);
+               }
+               rcu_read_unlock();
+
+               len = min(sizeof(kbuf) - 1, *lenp);
+               len = cpumask_scnprintf(kbuf, len, mask);
+               if (!len) {
+                       *lenp = 0;
+                       goto done;
+               }
+               if (len < *lenp)
+                       kbuf[len++] = '\n';
+               if (copy_to_user(buffer, kbuf, len)) {
+                       ret = -EFAULT;
+                       goto done;
+               }
+               *lenp = len;
+               *ppos += len;
+       }
+
+done:
+       free_cpumask_var(mask);
+       return ret;
+}
+
+static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
+                                      void __user *buffer, size_t *lenp,
+                                      loff_t *ppos)
+{
+       unsigned int old, *ptr;
+       int ret;
+
+       mutex_lock(&flow_limit_update_mutex);
+
+       ptr = table->data;
+       old = *ptr;
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (!ret && write && !is_power_of_2(*ptr)) {
+               *ptr = old;
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&flow_limit_update_mutex);
+       return ret;
+}
+#endif /* CONFIG_NET_FLOW_LIMIT */
+
 static struct ctl_table net_core_table[] = {
 #ifdef CONFIG_NET
        {
@@ -180,6 +284,37 @@ static struct ctl_table net_core_table[] = {
                .proc_handler   = rps_sock_flow_sysctl
        },
 #endif
+#ifdef CONFIG_NET_FLOW_LIMIT
+       {
+               .procname       = "flow_limit_cpu_bitmap",
+               .mode           = 0644,
+               .proc_handler   = flow_limit_cpu_sysctl
+       },
+       {
+               .procname       = "flow_limit_table_len",
+               .data           = &netdev_flow_limit_table_len,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = flow_limit_table_len_sysctl
+       },
+#endif /* CONFIG_NET_FLOW_LIMIT */
+#ifdef CONFIG_NET_LL_RX_POLL
+       {
+               .procname       = "low_latency_poll",
+               .data           = &sysctl_net_ll_poll,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "low_latency_read",
+               .data           = &sysctl_net_ll_read,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+#
+#endif
 #endif /* CONFIG_NET */
        {
                .procname       = "netdev_budget",
index c21f200eed9342c6ab34ee665fd5cc72c34f9cd0..dd4d506ef92395a9124a73391dd69beb92ed0388 100644 (file)
@@ -2078,9 +2078,9 @@ out_err:
 }
 
 static int dn_device_event(struct notifier_block *this, unsigned long event,
-                       void *ptr)
+                          void *ptr)
 {
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
index 7d9197063ebb98beee6c778b25c5e675ecedfb4b..dd0dfb25f4b1c6945ab07a5190e7393abb42a3cc 100644 (file)
@@ -158,11 +158,11 @@ static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MU
 static int min_priority[1];
 static int max_priority[] = { 127 }; /* From DECnet spec */
 
-static int dn_forwarding_proc(ctl_table *, int,
+static int dn_forwarding_proc(struct ctl_table *, int,
                        void __user *, size_t *, loff_t *);
 static struct dn_dev_sysctl_table {
        struct ctl_table_header *sysctl_header;
-       ctl_table dn_dev_vars[5];
+       struct ctl_table dn_dev_vars[5];
 } dn_dev_sysctl = {
        NULL,
        {
@@ -242,7 +242,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
        }
 }
 
-static int dn_forwarding_proc(ctl_table *table, int write,
+static int dn_forwarding_proc(struct ctl_table *table, int write,
                                void __user *buffer,
                                size_t *lenp, loff_t *ppos)
 {
index a55eeccaa72fd75474f9c6b1b5e0672b5833d4e5..5325b541c526d9d5e92b7481302dde6ce5e7d65c 100644 (file)
@@ -132,7 +132,7 @@ static int parse_addr(__le16 *addr, char *str)
        return 0;
 }
 
-static int dn_node_address_handler(ctl_table *table, int write,
+static int dn_node_address_handler(struct ctl_table *table, int write,
                                void __user *buffer,
                                size_t *lenp, loff_t *ppos)
 {
@@ -183,7 +183,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
        return 0;
 }
 
-static int dn_def_dev_handler(ctl_table *table, int write,
+static int dn_def_dev_handler(struct ctl_table *table, int write,
                                void __user *buffer,
                                size_t *lenp, loff_t *ppos)
 {
@@ -246,7 +246,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
        return 0;
 }
 
-static ctl_table dn_table[] = {
+static struct ctl_table dn_table[] = {
        {
                .procname = "node_address",
                .maxlen = 7,
index 55e1fd5b3e56d4ede22c2a05a6f72cc3b4bffa44..3b9d5f20bd1c695de768db960b190beb4370fa35 100644 (file)
@@ -1352,10 +1352,9 @@ static inline void lowpan_netlink_fini(void)
 }
 
 static int lowpan_device_event(struct notifier_block *unused,
-                               unsigned long event,
-                               void *ptr)
+                              unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        LIST_HEAD(del_list);
        struct lowpan_dev_record *entry, *tmp;
 
index 8603ca82710415b24e8ff85cbce1e45ba34491d2..37cf1a6ea3ad23662ea39d236aae62c0fdebed05 100644 (file)
@@ -9,10 +9,7 @@ config IP_MULTICAST
          intend to participate in the MBONE, a high bandwidth network on top
          of the Internet which carries audio and video broadcasts. More
          information about the MBONE is on the WWW at
-         <http://www.savetz.com/mbone/>. Information about the multicast
-         capabilities of the various network cards is contained in
-         <file:Documentation/networking/multicast.txt>. For most people, it's
-         safe to say N.
+         <http://www.savetz.com/mbone/>. For most people, it's safe to say N.
 
 config IP_ADVANCED_ROUTER
        bool "IP: advanced router"
@@ -223,10 +220,8 @@ config IP_MROUTE
          packets that have several destination addresses. It is needed on the
          MBONE, a high bandwidth network on top of the Internet which carries
          audio and video broadcasts. In order to do that, you would most
-         likely run the program mrouted. Information about the multicast
-         capabilities of the various network cards is contained in
-         <file:Documentation/networking/multicast.txt>. If you haven't heard
-         about it, you don't need it.
+         likely run the program mrouted. If you haven't heard about it, you
+         don't need it.
 
 config IP_MROUTE_MULTIPLE_TABLES
        bool "IP: multicast policy routing"
index 089cb9f363871327b027d8ce9246c830256cd456..86ded0bac9c7dad588ecb1f2ab382f1071ecbd0d 100644 (file)
@@ -8,10 +8,10 @@ obj-y     := route.o inetpeer.o protocol.o \
             inet_timewait_sock.o inet_connection_sock.o \
             tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
             tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
-            datagram.o raw.o udp.o udplite.o \
-            arp.o icmp.o devinet.o af_inet.o  igmp.o \
+            tcp_offload.o datagram.o raw.o udp.o udplite.o \
+            udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
             fib_frontend.o fib_semantics.o fib_trie.o \
-            inet_fragment.o ping.o
+            inet_fragment.o ping.o ip_tunnel_core.o
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
index d01be2a3ae53170c1075a57a32f2b11e1e75885b..b4d0be2b7ce94ada2582bd6faf0d1dd29a8920f4 100644 (file)
@@ -1295,6 +1295,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                       SKB_GSO_GRE |
                       SKB_GSO_TCPV6 |
                       SKB_GSO_UDP_TUNNEL |
+                      SKB_GSO_MPLS |
                       0)))
                goto out;
 
@@ -1384,7 +1385,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                goto out_unlock;
 
        id = ntohl(*(__be32 *)&iph->id);
-       flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id IP_DF));
+       flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
        id >>= 16;
 
        for (p = *head; p; p = p->next) {
@@ -1406,6 +1407,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                NAPI_GRO_CB(p)->flush |=
                        (iph->ttl ^ iph2->ttl) |
                        (iph->tos ^ iph2->tos) |
+                       (__force int)((iph->frag_off ^ iph2->frag_off) & htons(IP_DF)) |
                        ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 
                NAPI_GRO_CB(p)->flush |= flush;
@@ -1557,15 +1559,6 @@ static const struct net_protocol tcp_protocol = {
        .netns_ok       =       1,
 };
 
-static const struct net_offload tcp_offload = {
-       .callbacks = {
-               .gso_send_check =       tcp_v4_gso_send_check,
-               .gso_segment    =       tcp_tso_segment,
-               .gro_receive    =       tcp4_gro_receive,
-               .gro_complete   =       tcp4_gro_complete,
-       },
-};
-
 static const struct net_protocol udp_protocol = {
        .handler =      udp_rcv,
        .err_handler =  udp_err,
@@ -1573,13 +1566,6 @@ static const struct net_protocol udp_protocol = {
        .netns_ok =     1,
 };
 
-static const struct net_offload udp_offload = {
-       .callbacks = {
-               .gso_send_check = udp4_ufo_send_check,
-               .gso_segment = udp4_ufo_fragment,
-       },
-};
-
 static const struct net_protocol icmp_protocol = {
        .handler =      icmp_rcv,
        .err_handler =  icmp_err,
@@ -1679,10 +1665,10 @@ static int __init ipv4_offload_init(void)
        /*
         * Add offloads
         */
-       if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0)
+       if (udpv4_offload_init() < 0)
                pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
-       if (inet_add_offload(&tcp_offload, IPPROTO_TCP) < 0)
-               pr_crit("%s: Cannot add TCP protocol offlaod\n", __func__);
+       if (tcpv4_offload_init() < 0)
+               pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
 
        dev_add_offload(&ip_packet_offload);
        return 0;
index 2e7f1948216fe8ade7d57aa3baa652d14d6b9b7b..717902669d2f2ef34714c215c4695ffe59ddd283 100644 (file)
@@ -419,12 +419,9 @@ static void ah4_err(struct sk_buff *skb, u32 info)
        if (!x)
                return;
 
-       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
-               atomic_inc(&flow_cache_genid);
-               rt_genid_bump(net);
-
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
-       else
+       else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
        xfrm_state_put(x);
 }
index 247ec1951c35bed69d2182c6119085b9a7b2da96..4429b013f26946e03b687c25158267494542de9c 100644 (file)
@@ -1234,13 +1234,19 @@ out:
 static int arp_netdev_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_notifier_change_info *change_info;
 
        switch (event) {
        case NETDEV_CHANGEADDR:
                neigh_changeaddr(&arp_tbl, dev);
                rt_cache_flush(dev_net(dev));
                break;
+       case NETDEV_CHANGE:
+               change_info = ptr;
+               if (change_info->flags_changed & IFF_NOARP)
+                       neigh_changeaddr(&arp_tbl, dev);
+               break;
        default:
                break;
        }
index dfc39d4d48b7471fc83035746026fa14d1dcf497..8d48c392adccec6dc1f2ca8e3c8fd5a6750283e5 100644 (file)
@@ -215,6 +215,7 @@ void in_dev_finish_destroy(struct in_device *idev)
 
        WARN_ON(idev->ifa_list);
        WARN_ON(idev->mc_list);
+       kfree(rcu_dereference_protected(idev->mc_hash, 1));
 #ifdef NET_REFCNT_DEBUG
        pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
 #endif
@@ -1333,7 +1334,7 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev,
 static int inetdev_event(struct notifier_block *this, unsigned long event,
                         void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct in_device *in_dev = __in_dev_get_rtnl(dev);
 
        ASSERT_RTNL();
@@ -1941,7 +1942,7 @@ static void inet_forward_change(struct net *net)
        }
 }
 
-static int devinet_conf_proc(ctl_table *ctl, int write,
+static int devinet_conf_proc(struct ctl_table *ctl, int write,
                             void __user *buffer,
                             size_t *lenp, loff_t *ppos)
 {
@@ -1984,7 +1985,7 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
        return ret;
 }
 
-static int devinet_sysctl_forward(ctl_table *ctl, int write,
+static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
                                  void __user *buffer,
                                  size_t *lenp, loff_t *ppos)
 {
@@ -2027,7 +2028,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
        return ret;
 }
 
-static int ipv4_doint_and_flush(ctl_table *ctl, int write,
+static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
                                void __user *buffer,
                                size_t *lenp, loff_t *ppos)
 {
index 4cfe34d4cc967a94ed15f2deef3d249d2429e846..ab3d814bc80af8f377da971af189a49ae4f2f094 100644 (file)
@@ -502,12 +502,9 @@ static void esp4_err(struct sk_buff *skb, u32 info)
        if (!x)
                return;
 
-       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
-               atomic_inc(&flow_cache_genid);
-               rt_genid_bump(net);
-
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
-       else
+       else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
        xfrm_state_put(x);
 }
index c7629a209f9d84538b8048de89ce66aa3776a9bb..05a4888dede9868681651ddacffd68664d5933c6 100644 (file)
@@ -1038,7 +1038,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct in_device *in_dev;
        struct net *net = dev_net(dev);
 
index b2e805af9b87a03675d7bac1a7e210124757e566..ba4803e609b5fb0636e90d59099afc261868aa4f 100644 (file)
@@ -13,6 +13,8 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
+#include <linux/if.h>
+#include <linux/icmp.h>
 #include <linux/kernel.h>
 #include <linux/kmod.h>
 #include <linux/skbuff.h>
 #include <net/protocol.h>
 #include <net/gre.h>
 
+#include <net/icmp.h>
+#include <net/route.h>
+#include <net/xfrm.h>
 
 static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
-static DEFINE_SPINLOCK(gre_proto_lock);
+static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX];
 
 int gre_add_protocol(const struct gre_protocol *proto, u8 version)
 {
        if (version >= GREPROTO_MAX)
-               goto err_out;
-
-       spin_lock(&gre_proto_lock);
-       if (gre_proto[version])
-               goto err_out_unlock;
-
-       RCU_INIT_POINTER(gre_proto[version], proto);
-       spin_unlock(&gre_proto_lock);
-       return 0;
+               return -EINVAL;
 
-err_out_unlock:
-       spin_unlock(&gre_proto_lock);
-err_out:
-       return -1;
+       return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ?
+               0 : -EBUSY;
 }
 EXPORT_SYMBOL_GPL(gre_add_protocol);
 
 int gre_del_protocol(const struct gre_protocol *proto, u8 version)
 {
+       int ret;
+
        if (version >= GREPROTO_MAX)
-               goto err_out;
-
-       spin_lock(&gre_proto_lock);
-       if (rcu_dereference_protected(gre_proto[version],
-                       lockdep_is_held(&gre_proto_lock)) != proto)
-               goto err_out_unlock;
-       RCU_INIT_POINTER(gre_proto[version], NULL);
-       spin_unlock(&gre_proto_lock);
+               return -EINVAL;
+
+       ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ?
+               0 : -EBUSY;
+
+       if (ret)
+               return ret;
+
        synchronize_rcu();
        return 0;
-
-err_out_unlock:
-       spin_unlock(&gre_proto_lock);
-err_out:
-       return -1;
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
+void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+                     int hdr_len)
+{
+       struct gre_base_hdr *greh;
+
+       skb_push(skb, hdr_len);
+
+       greh = (struct gre_base_hdr *)skb->data;
+       greh->flags = tnl_flags_to_gre_flags(tpi->flags);
+       greh->protocol = tpi->proto;
+
+       if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
+               __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+               if (tpi->flags&TUNNEL_SEQ) {
+                       *ptr = tpi->seq;
+                       ptr--;
+               }
+               if (tpi->flags&TUNNEL_KEY) {
+                       *ptr = tpi->key;
+                       ptr--;
+               }
+               if (tpi->flags&TUNNEL_CSUM &&
+                   !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) {
+                       *ptr = 0;
+                       *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
+                                                                skb->len, 0));
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(gre_build_header);
+
+struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
+{
+       int err;
+
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
+       if (skb_is_gso(skb)) {
+               err = skb_unclone(skb, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto error;
+               skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
+               return skb;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
+               err = skb_checksum_help(skb);
+               if (unlikely(err))
+                       goto error;
+       } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+               skb->ip_summed = CHECKSUM_NONE;
+
+       return skb;
+error:
+       kfree_skb(skb);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(gre_handle_offloads);
+
+static __sum16 check_checksum(struct sk_buff *skb)
+{
+       __sum16 csum = 0;
+
+       switch (skb->ip_summed) {
+       case CHECKSUM_COMPLETE:
+               csum = csum_fold(skb->csum);
+
+               if (!csum)
+                       break;
+               /* Fall through. */
+
+       case CHECKSUM_NONE:
+               skb->csum = 0;
+               csum = __skb_checksum_complete(skb);
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               break;
+       }
+
+       return csum;
+}
+
+static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+                           bool *csum_err)
+{
+       unsigned int ip_hlen = ip_hdrlen(skb);
+       const struct gre_base_hdr *greh;
+       __be32 *options;
+       int hdr_len;
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+               return -EINVAL;
+
+       greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
+       if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+               return -EINVAL;
+
+       tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+       hdr_len = ip_gre_calc_hlen(tpi->flags);
+
+       if (!pskb_may_pull(skb, hdr_len))
+               return -EINVAL;
+
+       greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
+       tpi->proto = greh->protocol;
+
+       options = (__be32 *)(greh + 1);
+       if (greh->flags & GRE_CSUM) {
+               if (check_checksum(skb)) {
+                       *csum_err = true;
+                       return -EINVAL;
+               }
+               options++;
+       }
+
+       if (greh->flags & GRE_KEY) {
+               tpi->key = *options;
+               options++;
+       } else
+               tpi->key = 0;
+
+       if (unlikely(greh->flags & GRE_SEQ)) {
+               tpi->seq = *options;
+               options++;
+       } else
+               tpi->seq = 0;
+
+       /* WCCP version 1 and 2 protocol decoding.
+        * - Change protocol to IP
+        * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+        */
+       if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+               tpi->proto = htons(ETH_P_IP);
+               if ((*(u8 *)options & 0xF0) != 0x40) {
+                       hdr_len += 4;
+                       if (!pskb_may_pull(skb, hdr_len))
+                               return -EINVAL;
+               }
+       }
+
+       return iptunnel_pull_header(skb, hdr_len, tpi->proto);
+}
+
+static int gre_cisco_rcv(struct sk_buff *skb)
+{
+       struct tnl_ptk_info tpi;
+       int i;
+       bool csum_err = false;
+
+       if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+               goto drop;
+
+       rcu_read_lock();
+       for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
+               struct gre_cisco_protocol *proto;
+               int ret;
+
+               proto = rcu_dereference(gre_cisco_proto_list[i]);
+               if (!proto)
+                       continue;
+               ret = proto->handler(skb, &tpi);
+               if (ret == PACKET_RCVD) {
+                       rcu_read_unlock();
+                       return 0;
+               }
+       }
+       rcu_read_unlock();
+
+       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static void gre_cisco_err(struct sk_buff *skb, u32 info)
+{
+       /* All the routers (except for Linux) return only
+        * 8 bytes of packet payload. It means, that precise relaying of
+        * ICMP in the real Internet is absolutely infeasible.
+        *
+        * Moreover, Cisco "wise men" put GRE key to the third word
+        * in GRE header. It makes impossible maintaining even soft
+        * state for keyed
+        * GRE tunnels with enabled checksum. Tell them "thank you".
+        *
+        * Well, I wonder, rfc1812 was written by Cisco employee,
+        * what the hell these idiots break standards established
+        * by themselves???
+        */
+
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
+       struct tnl_ptk_info tpi;
+       bool csum_err = false;
+       int i;
+
+       if (parse_gre_header(skb, &tpi, &csum_err)) {
+               if (!csum_err)          /* ignore csum errors. */
+                       return;
+       }
+
+       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+                               skb->dev->ifindex, 0, IPPROTO_GRE, 0);
+               return;
+       }
+       if (type == ICMP_REDIRECT) {
+               ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
+                               IPPROTO_GRE, 0);
+               return;
+       }
+
+       rcu_read_lock();
+       for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
+               struct gre_cisco_protocol *proto;
+
+               proto = rcu_dereference(gre_cisco_proto_list[i]);
+               if (!proto)
+                       continue;
+
+               if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD)
+                       goto out;
+
+       }
+out:
+       rcu_read_unlock();
+}
+
 static int gre_rcv(struct sk_buff *skb)
 {
        const struct gre_protocol *proto;
@@ -220,27 +441,68 @@ static const struct net_offload gre_offload = {
        },
 };
 
+static const struct gre_protocol ipgre_protocol = {
+       .handler     = gre_cisco_rcv,
+       .err_handler = gre_cisco_err,
+};
+
+int gre_cisco_register(struct gre_cisco_protocol *newp)
+{
+       struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
+                                           &gre_cisco_proto_list[newp->priority];
+
+       return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY;
+}
+EXPORT_SYMBOL_GPL(gre_cisco_register);
+
+int gre_cisco_unregister(struct gre_cisco_protocol *del_proto)
+{
+       struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
+                                           &gre_cisco_proto_list[del_proto->priority];
+       int ret;
+
+       ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL;
+
+       if (ret)
+               return ret;
+
+       synchronize_net();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gre_cisco_unregister);
+
 static int __init gre_init(void)
 {
        pr_info("GRE over IPv4 demultiplexor driver\n");
 
        if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
                pr_err("can't add protocol\n");
-               return -EAGAIN;
+               goto err;
+       }
+
+       if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
+               pr_info("%s: can't add ipgre handler\n", __func__);
+               goto err_gre;
        }
 
        if (inet_add_offload(&gre_offload, IPPROTO_GRE)) {
                pr_err("can't add protocol offload\n");
-               inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
-               return -EAGAIN;
+               goto err_gso;
        }
 
        return 0;
+err_gso:
+       gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
+err_gre:
+       inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
+err:
+       return -EAGAIN;
 }
 
 static void __exit gre_exit(void)
 {
        inet_del_offload(&gre_offload, IPPROTO_GRE);
+       gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
        inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
 }
 
@@ -250,4 +512,3 @@ module_exit(gre_exit);
 MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
 MODULE_LICENSE("GPL");
-
index 76e10b47e053fd7fc2cbc7fa8d231f54704d1b25..5f7d11a458713f9c755dd1a1a40289b180a3e041 100644 (file)
@@ -482,7 +482,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 {
        struct iphdr *iph;
        int room;
-       struct icmp_bxm icmp_param;
+       struct icmp_bxm *icmp_param;
        struct rtable *rt = skb_rtable(skb_in);
        struct ipcm_cookie ipc;
        struct flowi4 fl4;
@@ -503,7 +503,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        iph = ip_hdr(skb_in);
 
        if ((u8 *)iph < skb_in->head ||
-           (skb_in->network_header + sizeof(*iph)) > skb_in->tail)
+           (skb_network_header(skb_in) + sizeof(*iph)) >
+           skb_tail_pointer(skb_in))
                goto out;
 
        /*
@@ -557,9 +558,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                }
        }
 
+       icmp_param = kmalloc(sizeof(*icmp_param), GFP_ATOMIC);
+       if (!icmp_param)
+               return;
+
        sk = icmp_xmit_lock(net);
        if (sk == NULL)
-               return;
+               goto out_free;
 
        /*
         *      Construct source address and options.
@@ -585,7 +590,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                                           IPTOS_PREC_INTERNETCONTROL) :
                                          iph->tos;
 
-       if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
+       if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
                goto out_unlock;
 
 
@@ -593,19 +598,19 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
         *      Prepare data for ICMP header.
         */
 
-       icmp_param.data.icmph.type       = type;
-       icmp_param.data.icmph.code       = code;
-       icmp_param.data.icmph.un.gateway = info;
-       icmp_param.data.icmph.checksum   = 0;
-       icmp_param.skb    = skb_in;
-       icmp_param.offset = skb_network_offset(skb_in);
+       icmp_param->data.icmph.type      = type;
+       icmp_param->data.icmph.code      = code;
+       icmp_param->data.icmph.un.gateway = info;
+       icmp_param->data.icmph.checksum  = 0;
+       icmp_param->skb   = skb_in;
+       icmp_param->offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
        ipc.addr = iph->saddr;
-       ipc.opt = &icmp_param.replyopts.opt;
+       ipc.opt = &icmp_param->replyopts.opt;
        ipc.tx_flags = 0;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
-                              type, code, &icmp_param);
+                              type, code, icmp_param);
        if (IS_ERR(rt))
                goto out_unlock;
 
@@ -617,19 +622,21 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        room = dst_mtu(&rt->dst);
        if (room > 576)
                room = 576;
-       room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
+       room -= sizeof(struct iphdr) + icmp_param->replyopts.opt.opt.optlen;
        room -= sizeof(struct icmphdr);
 
-       icmp_param.data_len = skb_in->len - icmp_param.offset;
-       if (icmp_param.data_len > room)
-               icmp_param.data_len = room;
-       icmp_param.head_len = sizeof(struct icmphdr);
+       icmp_param->data_len = skb_in->len - icmp_param->offset;
+       if (icmp_param->data_len > room)
+               icmp_param->data_len = room;
+       icmp_param->head_len = sizeof(struct icmphdr);
 
-       icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
+       icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
 ende:
        ip_rt_put(rt);
 out_unlock:
        icmp_xmit_unlock(sk);
+out_free:
+       kfree(icmp_param);
 out:;
 }
 EXPORT_SYMBOL(icmp_send);
@@ -657,7 +664,8 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
 }
 
 /*
- *     Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
+ *     Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, ICMP_QUENCH, and
+ *     ICMP_PARAMETERPROB.
  */
 
 static void icmp_unreach(struct sk_buff *skb)
@@ -939,7 +947,8 @@ error:
 void icmp_err(struct sk_buff *skb, u32 info)
 {
        struct iphdr *iph = (struct iphdr *)skb->data;
-       struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+       int offset = iph->ihl<<2;
+       struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
        int type = icmp_hdr(skb)->type;
        int code = icmp_hdr(skb)->code;
        struct net *net = dev_net(skb->dev);
@@ -949,7 +958,7 @@ void icmp_err(struct sk_buff *skb, u32 info)
         * triggered by ICMP_ECHOREPLY which sent from kernel.
         */
        if (icmph->type != ICMP_ECHOREPLY) {
-               ping_err(skb, info);
+               ping_err(skb, offset, info);
                return;
        }
 
index d8c232794bcb4bc995f850cabf42ac7bc8eac37a..cd71190d29625c11fe25aa603026cc98e0e2c34b 100644 (file)
@@ -363,7 +363,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
 static int igmpv3_sendpack(struct sk_buff *skb)
 {
        struct igmphdr *pig = igmp_hdr(skb);
-       const int igmplen = skb->tail - skb->transport_header;
+       const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
 
        pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
 
@@ -1217,6 +1217,57 @@ static void igmp_group_added(struct ip_mc_list *im)
  *     Multicast list managers
  */
 
+static u32 ip_mc_hash(const struct ip_mc_list *im)
+{
+       return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
+}
+
+static void ip_mc_hash_add(struct in_device *in_dev,
+                          struct ip_mc_list *im)
+{
+       struct ip_mc_list __rcu **mc_hash;
+       u32 hash;
+
+       mc_hash = rtnl_dereference(in_dev->mc_hash);
+       if (mc_hash) {
+               hash = ip_mc_hash(im);
+               im->next_hash = mc_hash[hash];
+               rcu_assign_pointer(mc_hash[hash], im);
+               return;
+       }
+
+       /* do not use a hash table for small number of items */
+       if (in_dev->mc_count < 4)
+               return;
+
+       mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG,
+                         GFP_KERNEL);
+       if (!mc_hash)
+               return;
+
+       for_each_pmc_rtnl(in_dev, im) {
+               hash = ip_mc_hash(im);
+               im->next_hash = mc_hash[hash];
+               RCU_INIT_POINTER(mc_hash[hash], im);
+       }
+
+       rcu_assign_pointer(in_dev->mc_hash, mc_hash);
+}
+
+static void ip_mc_hash_remove(struct in_device *in_dev,
+                             struct ip_mc_list *im)
+{
+       struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash);
+       struct ip_mc_list *aux;
+
+       if (!mc_hash)
+               return;
+       mc_hash += ip_mc_hash(im);
+       while ((aux = rtnl_dereference(*mc_hash)) != im)
+               mc_hash = &aux->next_hash;
+       *mc_hash = im->next_hash;
+}
+
 
 /*
  *     A socket has joined a multicast group on device dev.
@@ -1258,6 +1309,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        in_dev->mc_count++;
        rcu_assign_pointer(in_dev->mc_list, im);
 
+       ip_mc_hash_add(in_dev, im);
+
 #ifdef CONFIG_IP_MULTICAST
        igmpv3_del_delrec(in_dev, im->multiaddr);
 #endif
@@ -1314,6 +1367,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
             ip = &i->next_rcu) {
                if (i->multiaddr == addr) {
                        if (--i->users == 0) {
+                               ip_mc_hash_remove(in_dev, i);
                                *ip = i->next_rcu;
                                in_dev->mc_count--;
                                igmp_group_dropped(i);
@@ -1381,13 +1435,9 @@ void ip_mc_init_dev(struct in_device *in_dev)
 {
        ASSERT_RTNL();
 
-       in_dev->mc_tomb = NULL;
 #ifdef CONFIG_IP_MULTICAST
-       in_dev->mr_gq_running = 0;
        setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
                        (unsigned long)in_dev);
-       in_dev->mr_ifc_count = 0;
-       in_dev->mc_count     = 0;
        setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
                        (unsigned long)in_dev);
        in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
@@ -2321,12 +2371,25 @@ void ip_mc_drop_socket(struct sock *sk)
 int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
 {
        struct ip_mc_list *im;
+       struct ip_mc_list __rcu **mc_hash;
        struct ip_sf_list *psf;
        int rv = 0;
 
-       for_each_pmc_rcu(in_dev, im) {
-               if (im->multiaddr == mc_addr)
-                       break;
+       mc_hash = rcu_dereference(in_dev->mc_hash);
+       if (mc_hash) {
+               u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
+
+               for (im = rcu_dereference(mc_hash[hash]);
+                    im != NULL;
+                    im = rcu_dereference(im->next_hash)) {
+                       if (im->multiaddr == mc_addr)
+                               break;
+               }
+       } else {
+               for_each_pmc_rcu(in_dev, im) {
+                       if (im->multiaddr == mc_addr)
+                               break;
+               }
        }
        if (im && proto == IPPROTO_IGMP) {
                rv = 1;
index 7e06641e36ae785239f32b1d5afb80e504e0c60b..4b864430a8c42574b9d1e6a46d27c82fb5aaf0b4 100644 (file)
@@ -247,8 +247,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 {
        struct inet_frag_bucket *hb;
        struct inet_frag_queue *qp;
-#ifdef CONFIG_SMP
-#endif
        unsigned int hash;
 
        read_lock(&f->lock); /* Protects against hash rebuild */
index c625e4dad4b05066d92fe02d1778b555234d0f2a..c326e869993a9c1382e9817eae36dc53cd6724d3 100644 (file)
@@ -121,103 +121,8 @@ static int ipgre_tunnel_init(struct net_device *dev);
 static int ipgre_net_id __read_mostly;
 static int gre_tap_net_id __read_mostly;
 
-static __sum16 check_checksum(struct sk_buff *skb)
-{
-       __sum16 csum = 0;
-
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               csum = csum_fold(skb->csum);
-
-               if (!csum)
-                       break;
-               /* Fall through. */
-
-       case CHECKSUM_NONE:
-               skb->csum = 0;
-               csum = __skb_checksum_complete(skb);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-               break;
-       }
-
-       return csum;
-}
-
-static int ip_gre_calc_hlen(__be16 o_flags)
-{
-       int addend = 4;
-
-       if (o_flags&TUNNEL_CSUM)
-               addend += 4;
-       if (o_flags&TUNNEL_KEY)
-               addend += 4;
-       if (o_flags&TUNNEL_SEQ)
-               addend += 4;
-       return addend;
-}
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-                           bool *csum_err, int *hdr_len)
-{
-       unsigned int ip_hlen = ip_hdrlen(skb);
-       const struct gre_base_hdr *greh;
-       __be32 *options;
-
-       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
-               return -EINVAL;
-
-       greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
-       if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
-               return -EINVAL;
-
-       tpi->flags = gre_flags_to_tnl_flags(greh->flags);
-       *hdr_len = ip_gre_calc_hlen(tpi->flags);
-
-       if (!pskb_may_pull(skb, *hdr_len))
-               return -EINVAL;
-
-       greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
-
-       tpi->proto = greh->protocol;
-
-       options = (__be32 *)(greh + 1);
-       if (greh->flags & GRE_CSUM) {
-               if (check_checksum(skb)) {
-                       *csum_err = true;
-                       return -EINVAL;
-               }
-               options++;
-       }
-
-       if (greh->flags & GRE_KEY) {
-               tpi->key = *options;
-               options++;
-       } else
-               tpi->key = 0;
-
-       if (unlikely(greh->flags & GRE_SEQ)) {
-               tpi->seq = *options;
-               options++;
-       } else
-               tpi->seq = 0;
-
-       /* WCCP version 1 and 2 protocol decoding.
-        * - Change protocol to IP
-        * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-        */
-       if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
-               tpi->proto = htons(ETH_P_IP);
-               if ((*(u8 *)options & 0xF0) != 0x40) {
-                       *hdr_len += 4;
-                       if (!pskb_may_pull(skb, *hdr_len))
-                               return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-static void ipgre_err(struct sk_buff *skb, u32 info)
+static int ipgre_err(struct sk_buff *skb, u32 info,
+                    const struct tnl_ptk_info *tpi)
 {
 
        /* All the routers (except for Linux) return only
@@ -235,30 +140,22 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
           */
        struct net *net = dev_net(skb->dev);
        struct ip_tunnel_net *itn;
-       const struct iphdr *iph = (const struct iphdr *)skb->data;
+       const struct iphdr *iph;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
        struct ip_tunnel *t;
-       struct tnl_ptk_info tpi;
-       int hdr_len;
-       bool csum_err = false;
-
-       if (parse_gre_header(skb, &tpi, &csum_err, &hdr_len)) {
-               if (!csum_err)          /* ignore csum errors. */
-                       return;
-       }
 
        switch (type) {
        default:
        case ICMP_PARAMETERPROB:
-               return;
+               return PACKET_RCVD;
 
        case ICMP_DEST_UNREACH:
                switch (code) {
                case ICMP_SR_FAILED:
                case ICMP_PORT_UNREACH:
                        /* Impossible event. */
-                       return;
+                       return PACKET_RCVD;
                default:
                        /* All others are translated to HOST_UNREACH.
                           rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -269,137 +166,61 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
                break;
        case ICMP_TIME_EXCEEDED:
                if (code != ICMP_EXC_TTL)
-                       return;
+                       return PACKET_RCVD;
                break;
 
        case ICMP_REDIRECT:
                break;
        }
 
-       if (tpi.proto == htons(ETH_P_TEB))
+       if (tpi->proto == htons(ETH_P_TEB))
                itn = net_generic(net, gre_tap_net_id);
        else
                itn = net_generic(net, ipgre_net_id);
 
-       t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
-                            iph->daddr, iph->saddr, tpi.key);
+       iph = (const struct iphdr *)skb->data;
+       t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
+                            iph->daddr, iph->saddr, tpi->key);
 
        if (t == NULL)
-               return;
+               return PACKET_REJECT;
 
-       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, IPPROTO_GRE, 0);
-               return;
-       }
-       if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
-                             IPPROTO_GRE, 0);
-               return;
-       }
        if (t->parms.iph.daddr == 0 ||
            ipv4_is_multicast(t->parms.iph.daddr))
-               return;
+               return PACKET_RCVD;
 
        if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-               return;
+               return PACKET_RCVD;
 
        if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
                t->err_count++;
        else
                t->err_count = 1;
        t->err_time = jiffies;
+       return PACKET_RCVD;
 }
 
-static int ipgre_rcv(struct sk_buff *skb)
+static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 {
        struct net *net = dev_net(skb->dev);
        struct ip_tunnel_net *itn;
        const struct iphdr *iph;
        struct ip_tunnel *tunnel;
-       struct tnl_ptk_info tpi;
-       int hdr_len;
-       bool csum_err = false;
 
-       if (parse_gre_header(skb, &tpi, &csum_err, &hdr_len) < 0)
-               goto drop;
-
-       if (tpi.proto == htons(ETH_P_TEB))
+       if (tpi->proto == htons(ETH_P_TEB))
                itn = net_generic(net, gre_tap_net_id);
        else
                itn = net_generic(net, ipgre_net_id);
 
        iph = ip_hdr(skb);
-       tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
-                                 iph->saddr, iph->daddr, tpi.key);
+       tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
+                                 iph->saddr, iph->daddr, tpi->key);
 
        if (tunnel) {
-               ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
-               return 0;
-       }
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-drop:
-       kfree_skb(skb);
-       return 0;
-}
-
-static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb)
-{
-       int err;
-
-       if (skb_is_gso(skb)) {
-               err = skb_unclone(skb, GFP_ATOMIC);
-               if (unlikely(err))
-                       goto error;
-               skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
-               return skb;
-       } else if (skb->ip_summed == CHECKSUM_PARTIAL &&
-                  tunnel->parms.o_flags&TUNNEL_CSUM) {
-               err = skb_checksum_help(skb);
-               if (unlikely(err))
-                       goto error;
-       } else if (skb->ip_summed != CHECKSUM_PARTIAL)
-               skb->ip_summed = CHECKSUM_NONE;
-
-       return skb;
-
-error:
-       kfree_skb(skb);
-       return ERR_PTR(err);
-}
-
-static struct sk_buff *gre_build_header(struct sk_buff *skb,
-                                       const struct tnl_ptk_info *tpi,
-                                       int hdr_len)
-{
-       struct gre_base_hdr *greh;
-
-       skb_push(skb, hdr_len);
-
-       greh = (struct gre_base_hdr *)skb->data;
-       greh->flags = tnl_flags_to_gre_flags(tpi->flags);
-       greh->protocol = tpi->proto;
-
-       if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
-               __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
-               if (tpi->flags&TUNNEL_SEQ) {
-                       *ptr = tpi->seq;
-                       ptr--;
-               }
-               if (tpi->flags&TUNNEL_KEY) {
-                       *ptr = tpi->key;
-                       ptr--;
-               }
-               if (tpi->flags&TUNNEL_CSUM &&
-                   !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) {
-                       *(__sum16 *)ptr = 0;
-                       *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
-                                                                skb->len, 0));
-               }
+               ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
+               return PACKET_RCVD;
        }
-
-       return skb;
+       return PACKET_REJECT;
 }
 
 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -409,11 +230,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
        struct ip_tunnel *tunnel = netdev_priv(dev);
        struct tnl_ptk_info tpi;
 
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
-
        tpi.flags = tunnel->parms.o_flags;
        tpi.proto = proto;
        tpi.key = tunnel->parms.o_key;
@@ -422,13 +238,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
        tpi.seq = htonl(tunnel->o_seqno);
 
        /* Push GRE header. */
-       skb = gre_build_header(skb, &tpi, tunnel->hlen);
-       if (unlikely(!skb)) {
-               dev->stats.tx_dropped++;
-               return;
-       }
+       gre_build_header(skb, &tpi, tunnel->hlen);
 
-       ip_tunnel_xmit(skb, dev, tnl_params);
+       ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
@@ -437,7 +249,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr *tnl_params;
 
-       skb = handle_offloads(tunnel, skb);
+       skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
        if (IS_ERR(skb))
                goto out;
 
@@ -476,7 +288,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
-       skb = handle_offloads(tunnel, skb);
+       skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
        if (IS_ERR(skb))
                goto out;
 
@@ -707,9 +519,10 @@ static int ipgre_tunnel_init(struct net_device *dev)
        return ip_tunnel_init(dev);
 }
 
-static const struct gre_protocol ipgre_protocol = {
-       .handler     = ipgre_rcv,
-       .err_handler = ipgre_err,
+static struct gre_cisco_protocol ipgre_protocol = {
+       .handler        = ipgre_rcv,
+       .err_handler    = ipgre_err,
+       .priority       = 0,
 };
 
 static int __net_init ipgre_init_net(struct net *net)
@@ -977,7 +790,7 @@ static int __init ipgre_init(void)
        if (err < 0)
                goto pnet_tap_faied;
 
-       err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
+       err = gre_cisco_register(&ipgre_protocol);
        if (err < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                goto add_proto_failed;
@@ -996,7 +809,7 @@ static int __init ipgre_init(void)
 tap_ops_failed:
        rtnl_link_unregister(&ipgre_link_ops);
 rtnl_link_failed:
-       gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
+       gre_cisco_unregister(&ipgre_protocol);
 add_proto_failed:
        unregister_pernet_device(&ipgre_tap_net_ops);
 pnet_tap_faied:
@@ -1008,8 +821,7 @@ static void __exit ipgre_fini(void)
 {
        rtnl_link_unregister(&ipgre_tap_ops);
        rtnl_link_unregister(&ipgre_link_ops);
-       if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
-               pr_info("%s: can't remove protocol\n", __func__);
+       gre_cisco_unregister(&ipgre_protocol);
        unregister_pernet_device(&ipgre_tap_net_ops);
        unregister_pernet_device(&ipgre_net_ops);
 }
index 147abf5275aa6604df58c3b022eaeceaee10df4f..4bcabf3ab4cad3bdc43f5b9ed33eba9c1357557d 100644 (file)
@@ -84,7 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
 /* Generate a checksum for an outgoing IP datagram. */
-__inline__ void ip_send_check(struct iphdr *iph)
+void ip_send_check(struct iphdr *iph)
 {
        iph->check = 0;
        iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
index e4147ec1665a7af9c9661a12712babdf4bfd29ea..394cebc96d22237784e6ccd35c3f1c6a305937e0 100644 (file)
@@ -304,6 +304,7 @@ static struct net_device *__ip_tunnel_create(struct net *net,
 
        tunnel = netdev_priv(dev);
        tunnel->parms = *parms;
+       tunnel->net = net;
 
        err = register_netdevice(dev);
        if (err)
@@ -408,13 +409,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
        const struct iphdr *iph = ip_hdr(skb);
        int err;
 
-       secpath_reset(skb);
-
-       skb->protocol = tpi->proto;
-
-       skb->mac_header = skb->network_header;
-       __pskb_pull(skb, tunnel->hlen);
-       skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen);
 #ifdef CONFIG_NET_IPGRE_BROADCAST
        if (ipv4_is_multicast(iph->daddr)) {
                /* Looped back packet, drop it! */
@@ -442,23 +436,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
                tunnel->i_seqno = ntohl(tpi->seq) + 1;
        }
 
-       /* Warning: All skb pointers will be invalidated! */
-       if (tunnel->dev->type == ARPHRD_ETHER) {
-               if (!pskb_may_pull(skb, ETH_HLEN)) {
-                       tunnel->dev->stats.rx_length_errors++;
-                       tunnel->dev->stats.rx_errors++;
-                       goto drop;
-               }
-
-               iph = ip_hdr(skb);
-               skb->protocol = eth_type_trans(skb, tunnel->dev);
-               skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-       }
-
-       skb->pkt_type = PACKET_HOST;
-       __skb_tunnel_rx(skb, tunnel->dev);
-
-       skb_reset_network_header(skb);
        err = IP_ECN_decapsulate(iph, skb);
        if (unlikely(err)) {
                if (log_ecn_error)
@@ -477,6 +454,15 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
        tstats->rx_bytes += skb->len;
        u64_stats_update_end(&tstats->syncp);
 
+       if (tunnel->net != dev_net(tunnel->dev))
+               skb_scrub_packet(skb);
+
+       if (tunnel->dev->type == ARPHRD_ETHER) {
+               skb->protocol = eth_type_trans(skb, tunnel->dev);
+               skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+       } else {
+               skb->dev = tunnel->dev;
+       }
        gro_cells_receive(&tunnel->gro_cells, skb);
        return 0;
 
@@ -487,19 +473,18 @@ drop:
 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
 
 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
-                   const struct iphdr *tnl_params)
+                   const struct iphdr *tnl_params, const u8 protocol)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr *inner_iph;
-       struct iphdr *iph;
        struct flowi4 fl4;
        u8     tos, ttl;
        __be16 df;
        struct rtable *rt;              /* Route to the other host */
-       struct net_device *tdev;        /* Device to other host */
        unsigned int max_headroom;      /* The extra header space needed */
        __be32 dst;
        int mtu;
+       int err;
 
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 
@@ -560,8 +545,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                        tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
        }
 
-       rt = ip_route_output_tunnel(dev_net(dev), &fl4,
-                                   tunnel->parms.iph.protocol,
+       rt = ip_route_output_tunnel(tunnel->net, &fl4,
+                                   protocol,
                                    dst, tnl_params->saddr,
                                    tunnel->parms.o_key,
                                    RT_TOS(tos),
@@ -570,14 +555,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                dev->stats.tx_carrier_errors++;
                goto tx_error;
        }
-       tdev = rt->dst.dev;
-
-       if (tdev == dev) {
+       if (rt->dst.dev == dev) {
                ip_rt_put(rt);
                dev->stats.collisions++;
                goto tx_error;
        }
-
        df = tnl_params->frag_off;
 
        if (df)
@@ -595,6 +577,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                if (!skb_is_gso(skb) &&
                    (inner_iph->frag_off&htons(IP_DF)) &&
                     mtu < ntohs(inner_iph->tot_len)) {
+                       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
                        ip_rt_put(rt);
                        goto tx_error;
@@ -623,6 +606,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 #endif
 
+       if (tunnel->net != dev_net(dev))
+               skb_scrub_packet(skb);
+
        if (tunnel->err_count > 0) {
                if (time_before(jiffies,
                                tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
@@ -645,8 +631,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                        ttl = ip4_dst_hoplimit(&rt->dst);
        }
 
-       max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr)
-                                              + rt->dst.header_len;
+       max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+                       + rt->dst.header_len;
        if (max_headroom > dev->needed_headroom) {
                dev->needed_headroom = max_headroom;
                if (skb_cow_head(skb, dev->needed_headroom)) {
@@ -656,28 +642,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       skb_dst_drop(skb);
-       skb_dst_set(skb, &rt->dst);
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-
-       /* Push down and install the IP header. */
-       skb_push(skb, sizeof(struct iphdr));
-       skb_reset_network_header(skb);
-
-       iph = ip_hdr(skb);
-       inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       err = iptunnel_xmit(dev_net(dev), rt, skb,
+                           fl4.saddr, fl4.daddr, protocol,
+                           ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df);
+       iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 
-       iph->version    =       4;
-       iph->ihl        =       sizeof(struct iphdr) >> 2;
-       iph->frag_off   =       df;
-       iph->protocol   =       tnl_params->protocol;
-       iph->tos        =       ip_tunnel_ecn_encap(tos, inner_iph, skb);
-       iph->daddr      =       fl4.daddr;
-       iph->saddr      =       fl4.saddr;
-       iph->ttl        =       ttl;
-       tunnel_ip_select_ident(skb, inner_iph, &rt->dst);
-
-       iptunnel_xmit(skb, dev);
        return;
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -853,7 +822,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
 
-int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                                  struct rtnl_link_ops *ops, char *devname)
 {
        struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
@@ -899,7 +868,7 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
                unregister_netdevice_queue(itn->fb_tunnel_dev, head);
 }
 
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
 {
        LIST_HEAD(list);
 
@@ -926,6 +895,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
        if (ip_tunnel_find(itn, p, dev->type))
                return -EEXIST;
 
+       nt->net = net;
        nt->parms = *p;
        err = register_netdevice(dev);
        if (err)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
new file mode 100644 (file)
index 0000000..7167b08
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/mroute.h>
+#include <linux/init.h>
+#include <linux/in6.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/protocol.h>
+#include <net/ip_tunnels.h>
+#include <net/arp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/rtnetlink.h>
+
+int iptunnel_xmit(struct net *net, struct rtable *rt,
+                 struct sk_buff *skb,
+                 __be32 src, __be32 dst, __u8 proto,
+                 __u8 tos, __u8 ttl, __be16 df)
+{
+       int pkt_len = skb->len;
+       struct iphdr *iph;
+       int err;
+
+       nf_reset(skb);
+       secpath_reset(skb);
+       skb->rxhash = 0;
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
+       /* Push down and install the IP header. */
+       __skb_push(skb, sizeof(struct iphdr));
+       skb_reset_network_header(skb);
+
+       iph = ip_hdr(skb);
+
+       iph->version    =       4;
+       iph->ihl        =       sizeof(struct iphdr) >> 2;
+       iph->frag_off   =       df;
+       iph->protocol   =       proto;
+       iph->tos        =       tos;
+       iph->daddr      =       dst;
+       iph->saddr      =       src;
+       iph->ttl        =       ttl;
+       tunnel_ip_select_ident(skb,
+                              (const struct iphdr *)skb_inner_network_header(skb),
+                              &rt->dst);
+
+       err = ip_local_out(skb);
+       if (unlikely(net_xmit_eval(err)))
+               pkt_len = 0;
+       return pkt_len;
+}
+EXPORT_SYMBOL_GPL(iptunnel_xmit);
+
+int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+{
+       if (unlikely(!pskb_may_pull(skb, hdr_len)))
+               return -ENOMEM;
+
+       skb_pull_rcsum(skb, hdr_len);
+
+       if (inner_proto == htons(ETH_P_TEB)) {
+               struct ethhdr *eh = (struct ethhdr *)skb->data;
+
+               if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+                       return -ENOMEM;
+
+               if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
+                       skb->protocol = eh->h_proto;
+               else
+                       skb->protocol = htons(ETH_P_802_2);
+
+       } else {
+               skb->protocol = inner_proto;
+       }
+
+       nf_reset(skb);
+       secpath_reset(skb);
+       if (!skb->l4_rxhash)
+               skb->rxhash = 0;
+       skb_dst_drop(skb);
+       skb->vlan_tci = 0;
+       skb_set_queue_mapping(skb, 0);
+       skb->pkt_type = PACKET_HOST;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(iptunnel_pull_header);
index 9d2bdb2c1d3f631ab1ff8c6d9314005be0fa0c99..c118f6b576bbb50c6fa909bffa1f40930a7c2750 100644 (file)
@@ -361,8 +361,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        tunnel->err_count = 0;
        }
 
-       IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
-                             IPSKB_REROUTED);
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
        skb_dst_drop(skb);
        skb_dst_set(skb, &rt->dst);
        nf_reset(skb);
index 59cb8c7690561f3fc44ecc382557408af53cb5fb..826be4cb482a29b401f2314da6581e1127a7a731 100644 (file)
@@ -47,12 +47,9 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
        if (!x)
                return;
 
-       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
-               atomic_inc(&flow_cache_genid);
-               rt_genid_bump(net);
-
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
-       else
+       else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
        xfrm_state_put(x);
 }
index 77bfcce64fe568b3a162a9d36324061553275a5e..e6905fbda2a2981ebc5e9f347265507ec2154f27 100644 (file)
@@ -188,8 +188,12 @@ static int ipip_rcv(struct sk_buff *skb)
        struct net *net = dev_net(skb->dev);
        struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
        struct ip_tunnel *tunnel;
-       const struct iphdr *iph = ip_hdr(skb);
+       const struct iphdr *iph;
 
+       if (iptunnel_pull_header(skb, 0, tpi.proto))
+               goto drop;
+
+       iph = ip_hdr(skb);
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                        iph->saddr, iph->daddr, 0);
        if (tunnel) {
@@ -222,7 +226,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                skb->encapsulation = 1;
        }
 
-       ip_tunnel_xmit(skb, dev, tiph);
+       ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
        return NETDEV_TX_OK;
 
 tx_error:
index 9d9610ae78553895e9f6ccb0ea8260fa4a66ac17..132a09664704ed73ed850e8961f11fa3e3641473 100644 (file)
@@ -980,7 +980,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
 
        /* Copy the IP header */
 
-       skb->network_header = skb->tail;
+       skb_set_network_header(skb, skb->len);
        skb_put(skb, ihl);
        skb_copy_to_linear_data(skb, pkt->data, ihl);
        ip_hdr(skb)->protocol = 0;      /* Flag to the kernel this is a route add */
@@ -1609,7 +1609,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
 
 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
        struct mr_table *mrt;
        struct vif_device *v;
index e7916c193932222b7538c104dc71eeb8b3f8b5f2..4e9028017428405ef2005f7558406c5ba3086a24 100644 (file)
@@ -111,7 +111,7 @@ config IP_NF_TARGET_REJECT
          To compile it as a module, choose M here.  If unsure, say N.
 
 config IP_NF_TARGET_ULOG
-       tristate "ULOG target support"
+       tristate "ULOG target support (obsolete)"
        default m if NETFILTER_ADVANCED=n
        ---help---
 
index 5d5d4d1be9c2c7c2c951943e5a7d7ce7a4b940d4..30e4de94056722535ee5304e93b49bfb6c5cf889 100644 (file)
@@ -108,7 +108,7 @@ static int masq_device_event(struct notifier_block *this,
                             unsigned long event,
                             void *ptr)
 {
-       const struct net_device *dev = ptr;
+       const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
 
        if (event == NETDEV_DOWN) {
@@ -129,7 +129,10 @@ static int masq_inet_event(struct notifier_block *this,
                           void *ptr)
 {
        struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
-       return masq_device_event(this, event, dev);
+       struct netdev_notifier_info info;
+
+       netdev_notifier_info_init(&info, dev);
+       return masq_device_event(this, event, &info);
 }
 
 static struct notifier_block masq_dev_notifier = {
index f8a222cb64481c95e2f3b53207e3c6a3d372f4d2..57c671152c42dada0702b6c083fd7fb2ff22e585 100644 (file)
@@ -162,7 +162,8 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
        return skb;
 }
 
-static void ipt_ulog_packet(unsigned int hooknum,
+static void ipt_ulog_packet(struct net *net,
+                           unsigned int hooknum,
                            const struct sk_buff *skb,
                            const struct net_device *in,
                            const struct net_device *out,
@@ -174,7 +175,6 @@ static void ipt_ulog_packet(unsigned int hooknum,
        size_t size, copy_len;
        struct nlmsghdr *nlh;
        struct timeval tv;
-       struct net *net = dev_net(in ? in : out);
        struct ulog_net *ulog = ulog_pernet(net);
 
        /* ffs == find first bit set, necessary because userspace
@@ -231,8 +231,10 @@ static void ipt_ulog_packet(unsigned int hooknum,
        put_unaligned(tv.tv_usec, &pm->timestamp_usec);
        put_unaligned(skb->mark, &pm->mark);
        pm->hook = hooknum;
-       if (prefix != NULL)
-               strncpy(pm->prefix, prefix, sizeof(pm->prefix));
+       if (prefix != NULL) {
+               strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
+               pm->prefix[sizeof(pm->prefix) - 1] = '\0';
+       }
        else if (loginfo->prefix[0] != '\0')
                strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
        else
@@ -291,12 +293,15 @@ alloc_failure:
 static unsigned int
 ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       ipt_ulog_packet(par->hooknum, skb, par->in, par->out,
+       struct net *net = dev_net(par->in ? par->in : par->out);
+
+       ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
                        par->targinfo, NULL);
        return XT_CONTINUE;
 }
 
-static void ipt_logfn(u_int8_t pf,
+static void ipt_logfn(struct net *net,
+                     u_int8_t pf,
                      unsigned int hooknum,
                      const struct sk_buff *skb,
                      const struct net_device *in,
@@ -318,13 +323,19 @@ static void ipt_logfn(u_int8_t pf,
                strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
        }
 
-       ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+       ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
 }
 
 static int ulog_tg_check(const struct xt_tgchk_param *par)
 {
        const struct ipt_ulog_info *loginfo = par->targinfo;
 
+       if (!par->net->xt.ulog_warn_deprecated) {
+               pr_info("ULOG is deprecated and it will be removed soon, "
+                       "use NFLOG instead\n");
+               par->net->xt.ulog_warn_deprecated = true;
+       }
+
        if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
                pr_debug("prefix not null-terminated\n");
                return -EINVAL;
index 567d84168bd2e6e878b9a85733a649e3845eabdf..0a2e0e3e95ba0e64e17d4e4ba6a47a8201a35dfe 100644 (file)
@@ -223,7 +223,7 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
 static int log_invalid_proto_min = 0;
 static int log_invalid_proto_max = 255;
 
-static ctl_table ip_ct_sysctl_table[] = {
+static struct ctl_table ip_ct_sysctl_table[] = {
        {
                .procname       = "ip_conntrack_max",
                .maxlen         = sizeof(int),
index 7d93d62cd5fdae9f7f8a3f2e5c35f2a203df92e0..746427c9e7199513c12f4254ce048234f108a35d 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/netdevice.h>
 #include <net/snmp.h>
 #include <net/ip.h>
-#include <net/ipv6.h>
 #include <net/icmp.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
 #include <net/inet_common.h>
 #include <net/checksum.h>
 
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#endif
 
-static struct ping_table ping_table;
+
+struct ping_table ping_table;
+struct pingv6_ops pingv6_ops;
+EXPORT_SYMBOL_GPL(pingv6_ops);
 
 static u16 ping_port_rover;
 
@@ -58,6 +67,7 @@ static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int ma
        pr_debug("hash(%d) = %d\n", num, res);
        return res;
 }
+EXPORT_SYMBOL_GPL(ping_hash);
 
 static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
                                             struct net *net, unsigned int num)
@@ -65,7 +75,7 @@ static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
        return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
 }
 
-static int ping_v4_get_port(struct sock *sk, unsigned short ident)
+int ping_get_port(struct sock *sk, unsigned short ident)
 {
        struct hlist_nulls_node *node;
        struct hlist_nulls_head *hlist;
@@ -103,6 +113,10 @@ next_port:
                ping_portaddr_for_each_entry(sk2, node, hlist) {
                        isk2 = inet_sk(sk2);
 
+                       /* BUG? Why is this reuse and not reuseaddr? ping.c
+                        * doesn't turn off SO_REUSEADDR, and it doesn't expect
+                        * that other ping processes can steal its packets.
+                        */
                        if ((isk2->inet_num == ident) &&
                            (sk2 != sk) &&
                            (!sk2->sk_reuse || !sk->sk_reuse))
@@ -125,17 +139,18 @@ fail:
        write_unlock_bh(&ping_table.lock);
        return 1;
 }
+EXPORT_SYMBOL_GPL(ping_get_port);
 
-static void ping_v4_hash(struct sock *sk)
+void ping_hash(struct sock *sk)
 {
-       pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
+       pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
        BUG(); /* "Please do not press this button again." */
 }
 
-static void ping_v4_unhash(struct sock *sk)
+void ping_unhash(struct sock *sk)
 {
        struct inet_sock *isk = inet_sk(sk);
-       pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+       pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
        if (sk_hashed(sk)) {
                write_lock_bh(&ping_table.lock);
                hlist_nulls_del(&sk->sk_nulls_node);
@@ -146,31 +161,61 @@ static void ping_v4_unhash(struct sock *sk)
                write_unlock_bh(&ping_table.lock);
        }
 }
+EXPORT_SYMBOL_GPL(ping_unhash);
 
-static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
-                                  u16 ident, int dif)
+static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
 {
        struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
        struct sock *sk = NULL;
        struct inet_sock *isk;
        struct hlist_nulls_node *hnode;
+       int dif = skb->dev->ifindex;
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
+                        (int)ident, &ip_hdr(skb)->daddr, dif);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
+                        (int)ident, &ipv6_hdr(skb)->daddr, dif);
+#endif
+       }
 
-       pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
-                (int)ident, &daddr, dif);
        read_lock_bh(&ping_table.lock);
 
        ping_portaddr_for_each_entry(sk, hnode, hslot) {
                isk = inet_sk(sk);
 
-               pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk,
-                        (int)isk->inet_num, &isk->inet_rcv_saddr,
-                        sk->sk_bound_dev_if);
-
                pr_debug("iterate\n");
                if (isk->inet_num != ident)
                        continue;
-               if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr)
-                       continue;
+
+               if (skb->protocol == htons(ETH_P_IP) &&
+                   sk->sk_family == AF_INET) {
+                       pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk,
+                                (int) isk->inet_num, &isk->inet_rcv_saddr,
+                                sk->sk_bound_dev_if);
+
+                       if (isk->inet_rcv_saddr &&
+                           isk->inet_rcv_saddr != ip_hdr(skb)->daddr)
+                               continue;
+#if IS_ENABLED(CONFIG_IPV6)
+               } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                          sk->sk_family == AF_INET6) {
+                       struct ipv6_pinfo *np = inet6_sk(sk);
+
+                       pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
+                                (int) isk->inet_num,
+                                &inet6_sk(sk)->rcv_saddr,
+                                sk->sk_bound_dev_if);
+
+                       if (!ipv6_addr_any(&np->rcv_saddr) &&
+                           !ipv6_addr_equal(&np->rcv_saddr,
+                                            &ipv6_hdr(skb)->daddr))
+                               continue;
+#endif
+               }
+
                if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
                        continue;
 
@@ -200,7 +245,7 @@ static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
 }
 
 
-static int ping_init_sock(struct sock *sk)
+int ping_init_sock(struct sock *sk)
 {
        struct net *net = sock_net(sk);
        kgid_t group = current_egid();
@@ -225,8 +270,9 @@ static int ping_init_sock(struct sock *sk)
 
        return -EACCES;
 }
+EXPORT_SYMBOL_GPL(ping_init_sock);
 
-static void ping_close(struct sock *sk, long timeout)
+void ping_close(struct sock *sk, long timeout)
 {
        pr_debug("ping_close(sk=%p,sk->num=%u)\n",
                 inet_sk(sk), inet_sk(sk)->inet_num);
@@ -234,36 +280,122 @@ static void ping_close(struct sock *sk, long timeout)
 
        sk_common_release(sk);
 }
+EXPORT_SYMBOL_GPL(ping_close);
+
+/* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
+static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+                               struct sockaddr *uaddr, int addr_len) {
+       struct net *net = sock_net(sk);
+       if (sk->sk_family == AF_INET) {
+               struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+               int chk_addr_ret;
+
+               if (addr_len < sizeof(*addr))
+                       return -EINVAL;
+
+               pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
+                        sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
+
+               chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+
+               if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
+                       chk_addr_ret = RTN_LOCAL;
+
+               if ((sysctl_ip_nonlocal_bind == 0 &&
+                   isk->freebind == 0 && isk->transparent == 0 &&
+                    chk_addr_ret != RTN_LOCAL) ||
+                   chk_addr_ret == RTN_MULTICAST ||
+                   chk_addr_ret == RTN_BROADCAST)
+                       return -EADDRNOTAVAIL;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (sk->sk_family == AF_INET6) {
+               struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
+               int addr_type, scoped, has_addr;
+               struct net_device *dev = NULL;
+
+               if (addr_len < sizeof(*addr))
+                       return -EINVAL;
+
+               pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
+                        sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
+
+               addr_type = ipv6_addr_type(&addr->sin6_addr);
+               scoped = __ipv6_addr_needs_scope_id(addr_type);
+               if ((addr_type != IPV6_ADDR_ANY &&
+                    !(addr_type & IPV6_ADDR_UNICAST)) ||
+                   (scoped && !addr->sin6_scope_id))
+                       return -EINVAL;
+
+               rcu_read_lock();
+               if (addr->sin6_scope_id) {
+                       dev = dev_get_by_index_rcu(net, addr->sin6_scope_id);
+                       if (!dev) {
+                               rcu_read_unlock();
+                               return -ENODEV;
+                       }
+               }
+               has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
+                                                   scoped);
+               rcu_read_unlock();
+
+               if (!(isk->freebind || isk->transparent || has_addr ||
+                     addr_type == IPV6_ADDR_ANY))
+                       return -EADDRNOTAVAIL;
+
+               if (scoped)
+                       sk->sk_bound_dev_if = addr->sin6_scope_id;
+#endif
+       } else {
+               return -EAFNOSUPPORT;
+       }
+       return 0;
+}
+
+static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
+{
+       if (saddr->sa_family == AF_INET) {
+               struct inet_sock *isk = inet_sk(sk);
+               struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
+               isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (saddr->sa_family == AF_INET6) {
+               struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
+               struct ipv6_pinfo *np = inet6_sk(sk);
+               np->rcv_saddr = np->saddr = addr->sin6_addr;
+#endif
+       }
+}
 
+static void ping_clear_saddr(struct sock *sk, int dif)
+{
+       sk->sk_bound_dev_if = dif;
+       if (sk->sk_family == AF_INET) {
+               struct inet_sock *isk = inet_sk(sk);
+               isk->inet_rcv_saddr = isk->inet_saddr = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (sk->sk_family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+               memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+               memset(&np->saddr, 0, sizeof(np->saddr));
+#endif
+       }
+}
 /*
  * We need our own bind because there are no privileged id's == local ports.
  * Moreover, we don't allow binding to multi- and broadcast addresses.
  */
 
-static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
-       struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
        struct inet_sock *isk = inet_sk(sk);
        unsigned short snum;
-       int chk_addr_ret;
        int err;
+       int dif = sk->sk_bound_dev_if;
 
-       if (addr_len < sizeof(struct sockaddr_in))
-               return -EINVAL;
-
-       pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
-                sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
-
-       chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
-       if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
-               chk_addr_ret = RTN_LOCAL;
-
-       if ((sysctl_ip_nonlocal_bind == 0 &&
-           isk->freebind == 0 && isk->transparent == 0 &&
-            chk_addr_ret != RTN_LOCAL) ||
-           chk_addr_ret == RTN_MULTICAST ||
-           chk_addr_ret == RTN_BROADCAST)
-               return -EADDRNOTAVAIL;
+       err = ping_check_bind_addr(sk, isk, uaddr, addr_len);
+       if (err)
+               return err;
 
        lock_sock(sk);
 
@@ -272,42 +404,50 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                goto out;
 
        err = -EADDRINUSE;
-       isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
-       snum = ntohs(addr->sin_port);
-       if (ping_v4_get_port(sk, snum) != 0) {
-               isk->inet_saddr = isk->inet_rcv_saddr = 0;
+       ping_set_saddr(sk, uaddr);
+       snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port);
+       if (ping_get_port(sk, snum) != 0) {
+               ping_clear_saddr(sk, dif);
                goto out;
        }
 
-       pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
+       pr_debug("after bind(): num = %d, dif = %d\n",
                 (int)isk->inet_num,
-                &isk->inet_rcv_saddr,
                 (int)sk->sk_bound_dev_if);
 
        err = 0;
-       if (isk->inet_rcv_saddr)
+       if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
+           (sk->sk_family == AF_INET6 &&
+            !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
                sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+
        if (snum)
                sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
        isk->inet_sport = htons(isk->inet_num);
        isk->inet_daddr = 0;
        isk->inet_dport = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6)
+               memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
+#endif
+
        sk_dst_reset(sk);
 out:
        release_sock(sk);
        pr_debug("ping_v4_bind -> %d\n", err);
        return err;
 }
+EXPORT_SYMBOL_GPL(ping_bind);
 
 /*
  * Is this a supported type of ICMP message?
  */
 
-static inline int ping_supported(int type, int code)
+static inline int ping_supported(int family, int type, int code)
 {
-       if (type == ICMP_ECHO && code == 0)
-               return 1;
-       return 0;
+       return (family == AF_INET && type == ICMP_ECHO && code == 0) ||
+              (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0);
 }
 
 /*
@@ -315,30 +455,42 @@ static inline int ping_supported(int type, int code)
  * sort of error condition.
  */
 
-static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-
-void ping_err(struct sk_buff *skb, u32 info)
+void ping_err(struct sk_buff *skb, int offset, u32 info)
 {
-       struct iphdr *iph = (struct iphdr *)skb->data;
-       struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+       int family;
+       struct icmphdr *icmph;
        struct inet_sock *inet_sock;
-       int type = icmp_hdr(skb)->type;
-       int code = icmp_hdr(skb)->code;
+       int type;
+       int code;
        struct net *net = dev_net(skb->dev);
        struct sock *sk;
        int harderr;
        int err;
 
+       if (skb->protocol == htons(ETH_P_IP)) {
+               family = AF_INET;
+               type = icmp_hdr(skb)->type;
+               code = icmp_hdr(skb)->code;
+               icmph = (struct icmphdr *)(skb->data + offset);
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               family = AF_INET6;
+               type = icmp6_hdr(skb)->icmp6_type;
+               code = icmp6_hdr(skb)->icmp6_code;
+               icmph = (struct icmphdr *) (skb->data + offset);
+       } else {
+               BUG();
+       }
+
        /* We assume the packet has already been checked by icmp_unreach */
 
-       if (!ping_supported(icmph->type, icmph->code))
+       if (!ping_supported(family, icmph->type, icmph->code))
                return;
 
-       pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
-                code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
+       pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n",
+                skb->protocol, type, code, ntohs(icmph->un.echo.id),
+                ntohs(icmph->un.echo.sequence));
 
-       sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
-                           ntohs(icmph->un.echo.id), skb->dev->ifindex);
+       sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
        if (sk == NULL) {
                pr_debug("no socket, dropping\n");
                return; /* No socket for error */
@@ -349,72 +501,83 @@ void ping_err(struct sk_buff *skb, u32 info)
        harderr = 0;
        inet_sock = inet_sk(sk);
 
-       switch (type) {
-       default:
-       case ICMP_TIME_EXCEEDED:
-               err = EHOSTUNREACH;
-               break;
-       case ICMP_SOURCE_QUENCH:
-               /* This is not a real error but ping wants to see it.
-                * Report it with some fake errno. */
-               err = EREMOTEIO;
-               break;
-       case ICMP_PARAMETERPROB:
-               err = EPROTO;
-               harderr = 1;
-               break;
-       case ICMP_DEST_UNREACH:
-               if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
-                       ipv4_sk_update_pmtu(skb, sk, info);
-                       if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
-                               err = EMSGSIZE;
-                               harderr = 1;
-                               break;
+       if (skb->protocol == htons(ETH_P_IP)) {
+               switch (type) {
+               default:
+               case ICMP_TIME_EXCEEDED:
+                       err = EHOSTUNREACH;
+                       break;
+               case ICMP_SOURCE_QUENCH:
+                       /* This is not a real error but ping wants to see it.
+                        * Report it with some fake errno.
+                        */
+                       err = EREMOTEIO;
+                       break;
+               case ICMP_PARAMETERPROB:
+                       err = EPROTO;
+                       harderr = 1;
+                       break;
+               case ICMP_DEST_UNREACH:
+                       if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
+                               ipv4_sk_update_pmtu(skb, sk, info);
+                               if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
+                                       err = EMSGSIZE;
+                                       harderr = 1;
+                                       break;
+                               }
+                               goto out;
                        }
-                       goto out;
-               }
-               err = EHOSTUNREACH;
-               if (code <= NR_ICMP_UNREACH) {
-                       harderr = icmp_err_convert[code].fatal;
-                       err = icmp_err_convert[code].errno;
+                       err = EHOSTUNREACH;
+                       if (code <= NR_ICMP_UNREACH) {
+                               harderr = icmp_err_convert[code].fatal;
+                               err = icmp_err_convert[code].errno;
+                       }
+                       break;
+               case ICMP_REDIRECT:
+                       /* See ICMP_SOURCE_QUENCH */
+                       ipv4_sk_redirect(skb, sk);
+                       err = EREMOTEIO;
+                       break;
                }
-               break;
-       case ICMP_REDIRECT:
-               /* See ICMP_SOURCE_QUENCH */
-               ipv4_sk_redirect(skb, sk);
-               err = EREMOTEIO;
-               break;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
+#endif
        }
 
        /*
         *      RFC1122: OK.  Passes ICMP errors back to application, as per
         *      4.1.3.3.
         */
-       if (!inet_sock->recverr) {
+       if ((family == AF_INET && !inet_sock->recverr) ||
+           (family == AF_INET6 && !inet6_sk(sk)->recverr)) {
                if (!harderr || sk->sk_state != TCP_ESTABLISHED)
                        goto out;
        } else {
-               ip_icmp_error(sk, skb, err, 0 /* no remote port */,
-                        info, (u8 *)icmph);
+               if (family == AF_INET) {
+                       ip_icmp_error(sk, skb, err, 0 /* no remote port */,
+                                     info, (u8 *)icmph);
+#if IS_ENABLED(CONFIG_IPV6)
+               } else if (family == AF_INET6) {
+                       pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
+                                                  info, (u8 *)icmph);
+#endif
+               }
        }
        sk->sk_err = err;
        sk->sk_error_report(sk);
 out:
        sock_put(sk);
 }
+EXPORT_SYMBOL_GPL(ping_err);
 
 /*
- *     Copy and checksum an ICMP Echo packet from user space into a buffer.
+ *     Copy and checksum an ICMP Echo packet from user space into a buffer
+ *     starting from the payload.
  */
 
-struct pingfakehdr {
-       struct icmphdr icmph;
-       struct iovec *iov;
-       __wsum wcheck;
-};
-
-static int ping_getfrag(void *from, char *to,
-                       int offset, int fraglen, int odd, struct sk_buff *skb)
+int ping_getfrag(void *from, char *to,
+                int offset, int fraglen, int odd, struct sk_buff *skb)
 {
        struct pingfakehdr *pfh = (struct pingfakehdr *)from;
 
@@ -425,20 +588,33 @@ static int ping_getfrag(void *from, char *to,
                            pfh->iov, 0, fraglen - sizeof(struct icmphdr),
                            &pfh->wcheck))
                        return -EFAULT;
+       } else if (offset < sizeof(struct icmphdr)) {
+                       BUG();
+       } else {
+               if (csum_partial_copy_fromiovecend
+                               (to, pfh->iov, offset - sizeof(struct icmphdr),
+                                fraglen, &pfh->wcheck))
+                       return -EFAULT;
+       }
 
-               return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+       /* For IPv6, checksum each skb as we go along, as expected by
+        * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in
+        * wcheck, it will be finalized in ping_v4_push_pending_frames.
+        */
+       if (pfh->family == AF_INET6) {
+               skb->csum = pfh->wcheck;
+               skb->ip_summed = CHECKSUM_NONE;
+               pfh->wcheck = 0;
        }
-       if (offset < sizeof(struct icmphdr))
-               BUG();
-       if (csum_partial_copy_fromiovecend
-                       (to, pfh->iov, offset - sizeof(struct icmphdr),
-                        fraglen, &pfh->wcheck))
-               return -EFAULT;
+#endif
+
        return 0;
 }
+EXPORT_SYMBOL_GPL(ping_getfrag);
 
-static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
-                                   struct flowi4 *fl4)
+static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
+                                      struct flowi4 *fl4)
 {
        struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
 
@@ -450,24 +626,9 @@ static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
        return ip_push_pending_frames(sk, fl4);
 }
 
-static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                       size_t len)
-{
-       struct net *net = sock_net(sk);
-       struct flowi4 fl4;
-       struct inet_sock *inet = inet_sk(sk);
-       struct ipcm_cookie ipc;
-       struct icmphdr user_icmph;
-       struct pingfakehdr pfh;
-       struct rtable *rt = NULL;
-       struct ip_options_data opt_copy;
-       int free = 0;
-       __be32 saddr, daddr, faddr;
-       u8  tos;
-       int err;
-
-       pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
-
+int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+                       void *user_icmph, size_t icmph_len) {
+       u8 type, code;
 
        if (len > 0xFFFF)
                return -EMSGSIZE;
@@ -482,15 +643,53 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        /*
         *      Fetch the ICMP header provided by the userland.
-        *      iovec is modified!
+        *      iovec is modified! The ICMP header is consumed.
         */
-
-       if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov,
-                            sizeof(struct icmphdr)))
+       if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len))
                return -EFAULT;
-       if (!ping_supported(user_icmph.type, user_icmph.code))
+
+       if (family == AF_INET) {
+               type = ((struct icmphdr *) user_icmph)->type;
+               code = ((struct icmphdr *) user_icmph)->code;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (family == AF_INET6) {
+               type = ((struct icmp6hdr *) user_icmph)->icmp6_type;
+               code = ((struct icmp6hdr *) user_icmph)->icmp6_code;
+#endif
+       } else {
+               BUG();
+       }
+
+       if (!ping_supported(family, type, code))
                return -EINVAL;
 
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ping_common_sendmsg);
+
+int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                   size_t len)
+{
+       struct net *net = sock_net(sk);
+       struct flowi4 fl4;
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipcm_cookie ipc;
+       struct icmphdr user_icmph;
+       struct pingfakehdr pfh;
+       struct rtable *rt = NULL;
+       struct ip_options_data opt_copy;
+       int free = 0;
+       __be32 saddr, daddr, faddr;
+       u8  tos;
+       int err;
+
+       pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
+
+       err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph,
+                                 sizeof(user_icmph));
+       if (err)
+               return err;
+
        /*
         *      Get and verify the address.
         */
@@ -595,13 +794,14 @@ back_from_confirm:
        pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
        pfh.iov = msg->msg_iov;
        pfh.wcheck = 0;
+       pfh.family = AF_INET;
 
        err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
                        0, &ipc, &rt, msg->msg_flags);
        if (err)
                ip_flush_pending_frames(sk);
        else
-               err = ping_push_pending_frames(sk, &pfh, &fl4);
+               err = ping_v4_push_pending_frames(sk, &pfh, &fl4);
        release_sock(sk);
 
 out:
@@ -622,11 +822,13 @@ do_confirm:
        goto out;
 }
 
-static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                       size_t len, int noblock, int flags, int *addr_len)
+int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                size_t len, int noblock, int flags, int *addr_len)
 {
        struct inet_sock *isk = inet_sk(sk);
-       struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+       int family = sk->sk_family;
+       struct sockaddr_in *sin;
+       struct sockaddr_in6 *sin6;
        struct sk_buff *skb;
        int copied, err;
 
@@ -636,11 +838,22 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (flags & MSG_OOB)
                goto out;
 
-       if (addr_len)
-               *addr_len = sizeof(*sin);
+       if (addr_len) {
+               if (family == AF_INET)
+                       *addr_len = sizeof(*sin);
+               else if (family == AF_INET6 && addr_len)
+                       *addr_len = sizeof(*sin6);
+       }
 
-       if (flags & MSG_ERRQUEUE)
-               return ip_recv_error(sk, msg, len);
+       if (flags & MSG_ERRQUEUE) {
+               if (family == AF_INET) {
+                       return ip_recv_error(sk, msg, len);
+#if IS_ENABLED(CONFIG_IPV6)
+               } else if (family == AF_INET6) {
+                       return pingv6_ops.ipv6_recv_error(sk, msg, len);
+#endif
+               }
+       }
 
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb)
@@ -659,15 +872,40 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        sock_recv_timestamp(msg, sk, skb);
 
-       /* Copy the address. */
-       if (sin) {
+       /* Copy the address and add cmsg data. */
+       if (family == AF_INET) {
+               sin = (struct sockaddr_in *) msg->msg_name;
                sin->sin_family = AF_INET;
                sin->sin_port = 0 /* skb->h.uh->source */;
                sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
                memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+
+               if (isk->cmsg_flags)
+                       ip_cmsg_recv(msg, skb);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+               struct ipv6hdr *ip6 = ipv6_hdr(skb);
+               sin6 = (struct sockaddr_in6 *) msg->msg_name;
+               sin6->sin6_family = AF_INET6;
+               sin6->sin6_port = 0;
+               sin6->sin6_addr = ip6->saddr;
+
+               sin6->sin6_flowinfo = 0;
+               if (np->sndflow)
+                       sin6->sin6_flowinfo = ip6_flowinfo(ip6);
+
+               sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+                                                         IP6CB(skb)->iif);
+
+               if (inet6_sk(sk)->rxopt.all)
+                       pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
+#endif
+       } else {
+               BUG();
        }
-       if (isk->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+
        err = copied;
 
 done:
@@ -676,8 +914,9 @@ out:
        pr_debug("ping_recvmsg -> %d\n", err);
        return err;
 }
+EXPORT_SYMBOL_GPL(ping_recvmsg);
 
-static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
                 inet_sk(sk), inet_sk(sk)->inet_num, skb);
@@ -688,6 +927,7 @@ static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
 
 
 /*
@@ -698,10 +938,7 @@ void ping_rcv(struct sk_buff *skb)
 {
        struct sock *sk;
        struct net *net = dev_net(skb->dev);
-       struct iphdr *iph = ip_hdr(skb);
        struct icmphdr *icmph = icmp_hdr(skb);
-       __be32 saddr = iph->saddr;
-       __be32 daddr = iph->daddr;
 
        /* We assume the packet has already been checked by icmp_rcv */
 
@@ -711,8 +948,7 @@ void ping_rcv(struct sk_buff *skb)
        /* Push ICMP header back */
        skb_push(skb, skb->data - (u8 *)icmph);
 
-       sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id),
-                           skb->dev->ifindex);
+       sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
        if (sk != NULL) {
                pr_debug("rcv on socket %p\n", sk);
                ping_queue_rcv_skb(sk, skb_get(skb));
@@ -723,6 +959,7 @@ void ping_rcv(struct sk_buff *skb)
 
        /* We're called from icmp_rcv(). kfree_skb() is done there. */
 }
+EXPORT_SYMBOL_GPL(ping_rcv);
 
 struct proto ping_prot = {
        .name =         "PING",
@@ -733,14 +970,14 @@ struct proto ping_prot = {
        .disconnect =   udp_disconnect,
        .setsockopt =   ip_setsockopt,
        .getsockopt =   ip_getsockopt,
-       .sendmsg =      ping_sendmsg,
+       .sendmsg =      ping_v4_sendmsg,
        .recvmsg =      ping_recvmsg,
        .bind =         ping_bind,
        .backlog_rcv =  ping_queue_rcv_skb,
        .release_cb =   ip4_datagram_release_cb,
-       .hash =         ping_v4_hash,
-       .unhash =       ping_v4_unhash,
-       .get_port =     ping_v4_get_port,
+       .hash =         ping_hash,
+       .unhash =       ping_unhash,
+       .get_port =     ping_get_port,
        .obj_size =     sizeof(struct inet_sock),
 };
 EXPORT_SYMBOL(ping_prot);
@@ -764,7 +1001,8 @@ static struct sock *ping_get_first(struct seq_file *seq, int start)
                        continue;
 
                sk_nulls_for_each(sk, node, hslot) {
-                       if (net_eq(sock_net(sk), net))
+                       if (net_eq(sock_net(sk), net) &&
+                           sk->sk_family == state->family)
                                goto found;
                }
        }
@@ -797,17 +1035,24 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
        return pos ? NULL : sk;
 }
 
-static void *ping_seq_start(struct seq_file *seq, loff_t *pos)
+void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
 {
        struct ping_iter_state *state = seq->private;
        state->bucket = 0;
+       state->family = family;
 
        read_lock_bh(&ping_table.lock);
 
        return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
 }
+EXPORT_SYMBOL_GPL(ping_seq_start);
+
+static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       return ping_seq_start(seq, pos, AF_INET);
+}
 
-static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct sock *sk;
 
@@ -819,13 +1064,15 @@ static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        ++*pos;
        return sk;
 }
+EXPORT_SYMBOL_GPL(ping_seq_next);
 
-static void ping_seq_stop(struct seq_file *seq, void *v)
+void ping_seq_stop(struct seq_file *seq, void *v)
 {
        read_unlock_bh(&ping_table.lock);
 }
+EXPORT_SYMBOL_GPL(ping_seq_stop);
 
-static void ping_format_sock(struct sock *sp, struct seq_file *f,
+static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
                int bucket, int *len)
 {
        struct inet_sock *inet = inet_sk(sp);
@@ -846,7 +1093,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
                atomic_read(&sp->sk_drops), len);
 }
 
-static int ping_seq_show(struct seq_file *seq, void *v)
+static int ping_v4_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
                seq_printf(seq, "%-127s\n",
@@ -857,72 +1104,86 @@ static int ping_seq_show(struct seq_file *seq, void *v)
                struct ping_iter_state *state = seq->private;
                int len;
 
-               ping_format_sock(v, seq, state->bucket, &len);
+               ping_v4_format_sock(v, seq, state->bucket, &len);
                seq_printf(seq, "%*s\n", 127 - len, "");
        }
        return 0;
 }
 
-static const struct seq_operations ping_seq_ops = {
-       .show           = ping_seq_show,
-       .start          = ping_seq_start,
+static const struct seq_operations ping_v4_seq_ops = {
+       .show           = ping_v4_seq_show,
+       .start          = ping_v4_seq_start,
        .next           = ping_seq_next,
        .stop           = ping_seq_stop,
 };
 
 static int ping_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open_net(inode, file, &ping_seq_ops,
+       struct ping_seq_afinfo *afinfo = PDE_DATA(inode);
+       return seq_open_net(inode, file, &afinfo->seq_ops,
                           sizeof(struct ping_iter_state));
 }
 
-static const struct file_operations ping_seq_fops = {
+const struct file_operations ping_seq_fops = {
        .open           = ping_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release_net,
 };
+EXPORT_SYMBOL_GPL(ping_seq_fops);
+
+static struct ping_seq_afinfo ping_v4_seq_afinfo = {
+       .name           = "icmp",
+       .family         = AF_INET,
+       .seq_fops       = &ping_seq_fops,
+       .seq_ops        = {
+               .start          = ping_v4_seq_start,
+               .show           = ping_v4_seq_show,
+               .next           = ping_seq_next,
+               .stop           = ping_seq_stop,
+       },
+};
 
-static int ping_proc_register(struct net *net)
+int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo)
 {
        struct proc_dir_entry *p;
-       int rc = 0;
-
-       p = proc_create("icmp", S_IRUGO, net->proc_net, &ping_seq_fops);
+       p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+                            afinfo->seq_fops, afinfo);
        if (!p)
-               rc = -ENOMEM;
-       return rc;
+               return -ENOMEM;
+       return 0;
 }
+EXPORT_SYMBOL_GPL(ping_proc_register);
 
-static void ping_proc_unregister(struct net *net)
+void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo)
 {
-       remove_proc_entry("icmp", net->proc_net);
+       remove_proc_entry(afinfo->name, net->proc_net);
 }
+EXPORT_SYMBOL_GPL(ping_proc_unregister);
 
-
-static int __net_init ping_proc_init_net(struct net *net)
+static int __net_init ping_v4_proc_init_net(struct net *net)
 {
-       return ping_proc_register(net);
+       return ping_proc_register(net, &ping_v4_seq_afinfo);
 }
 
-static void __net_exit ping_proc_exit_net(struct net *net)
+static void __net_exit ping_v4_proc_exit_net(struct net *net)
 {
-       ping_proc_unregister(net);
+       ping_proc_unregister(net, &ping_v4_seq_afinfo);
 }
 
-static struct pernet_operations ping_net_ops = {
-       .init = ping_proc_init_net,
-       .exit = ping_proc_exit_net,
+static struct pernet_operations ping_v4_net_ops = {
+       .init = ping_v4_proc_init_net,
+       .exit = ping_v4_proc_exit_net,
 };
 
 int __init ping_proc_init(void)
 {
-       return register_pernet_subsys(&ping_net_ops);
+       return register_pernet_subsys(&ping_v4_net_ops);
 }
 
 void ping_proc_exit(void)
 {
-       unregister_pernet_subsys(&ping_net_ops);
+       unregister_pernet_subsys(&ping_v4_net_ops);
 }
 
 #endif
index 2a5bf86d241518816bf753c76764f3cb8837233f..6577a1149a47c17853e8c7e0e3daa267ef16d1cd 100644 (file)
@@ -273,6 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
        SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
        SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
+       SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
        SNMP_MIB_SENTINEL
 };
 
index 550781a17b34f75f94b919a39120eb119db106a1..f3fa42eac461520393c3be14345e4a5f33485b2d 100644 (file)
@@ -594,11 +594,25 @@ static inline u32 fnhe_hashfun(__be32 daddr)
        return hval & (FNHE_HASH_SIZE - 1);
 }
 
+static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
+{
+       rt->rt_pmtu = fnhe->fnhe_pmtu;
+       rt->dst.expires = fnhe->fnhe_expires;
+
+       if (fnhe->fnhe_gw) {
+               rt->rt_flags |= RTCF_REDIRECTED;
+               rt->rt_gateway = fnhe->fnhe_gw;
+               rt->rt_uses_gateway = 1;
+       }
+}
+
 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
                                  u32 pmtu, unsigned long expires)
 {
        struct fnhe_hash_bucket *hash;
        struct fib_nh_exception *fnhe;
+       struct rtable *rt;
+       unsigned int i;
        int depth;
        u32 hval = fnhe_hashfun(daddr);
 
@@ -627,8 +641,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
                        fnhe->fnhe_gw = gw;
                if (pmtu) {
                        fnhe->fnhe_pmtu = pmtu;
-                       fnhe->fnhe_expires = expires;
+                       fnhe->fnhe_expires = max(1UL, expires);
                }
+               /* Update all cached dsts too */
+               rt = rcu_dereference(fnhe->fnhe_rth);
+               if (rt)
+                       fill_route_from_fnhe(rt, fnhe);
        } else {
                if (depth > FNHE_RECLAIM_DEPTH)
                        fnhe = fnhe_oldest(hash);
@@ -640,10 +658,23 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
                        fnhe->fnhe_next = hash->chain;
                        rcu_assign_pointer(hash->chain, fnhe);
                }
+               fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
                fnhe->fnhe_daddr = daddr;
                fnhe->fnhe_gw = gw;
                fnhe->fnhe_pmtu = pmtu;
                fnhe->fnhe_expires = expires;
+
+               /* Exception created; mark the cached routes for the nexthop
+                * stale, so anyone caching it rechecks if this exception
+                * applies to them.
+                */
+               for_each_possible_cpu(i) {
+                       struct rtable __rcu **prt;
+                       prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
+                       rt = rcu_dereference(*prt);
+                       if (rt)
+                               rt->dst.obsolete = DST_OBSOLETE_KILL;
+               }
        }
 
        fnhe->fnhe_stamp = jiffies;
@@ -737,10 +768,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
 {
        struct rtable *rt;
        struct flowi4 fl4;
+       const struct iphdr *iph = (const struct iphdr *) skb->data;
+       int oif = skb->dev->ifindex;
+       u8 tos = RT_TOS(iph->tos);
+       u8 prot = iph->protocol;
+       u32 mark = skb->mark;
 
        rt = (struct rtable *) dst;
 
-       ip_rt_build_flow_key(&fl4, sk, skb);
+       __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
        __ip_do_redirect(rt, skb, &fl4, true);
 }
 
@@ -917,12 +953,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (mtu < ip_rt_min_pmtu)
                mtu = ip_rt_min_pmtu;
 
-       if (!rt->rt_pmtu) {
-               dst->obsolete = DST_OBSOLETE_KILL;
-       } else {
-               rt->rt_pmtu = mtu;
-               dst->expires = max(1UL, jiffies + ip_rt_mtu_expires);
-       }
+       if (rt->rt_pmtu == mtu &&
+           time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
+               return;
 
        rcu_read_lock();
        if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
@@ -1063,11 +1096,11 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
         * DST_OBSOLETE_FORCE_CHK which forces validation calls down
         * into this function always.
         *
-        * When a PMTU/redirect information update invalidates a
-        * route, this is indicated by setting obsolete to
-        * DST_OBSOLETE_KILL.
+        * When a PMTU/redirect information update invalidates a route,
+        * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
+        * DST_OBSOLETE_DEAD by dst_free().
         */
-       if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
+       if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
                return NULL;
        return dst;
 }
@@ -1209,26 +1242,17 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
        spin_lock_bh(&fnhe_lock);
 
        if (daddr == fnhe->fnhe_daddr) {
+               int genid = fnhe_genid(dev_net(rt->dst.dev));
                struct rtable *orig = rcu_dereference(fnhe->fnhe_rth);
-               if (orig && rt_is_expired(orig)) {
+
+               if (fnhe->fnhe_genid != genid) {
+                       fnhe->fnhe_genid = genid;
                        fnhe->fnhe_gw = 0;
                        fnhe->fnhe_pmtu = 0;
                        fnhe->fnhe_expires = 0;
                }
-               if (fnhe->fnhe_pmtu) {
-                       unsigned long expires = fnhe->fnhe_expires;
-                       unsigned long diff = expires - jiffies;
-
-                       if (time_before(jiffies, expires)) {
-                               rt->rt_pmtu = fnhe->fnhe_pmtu;
-                               dst_set_expires(&rt->dst, diff);
-                       }
-               }
-               if (fnhe->fnhe_gw) {
-                       rt->rt_flags |= RTCF_REDIRECTED;
-                       rt->rt_gateway = fnhe->fnhe_gw;
-                       rt->rt_uses_gateway = 1;
-               } else if (!rt->rt_gateway)
+               fill_route_from_fnhe(rt, fnhe);
+               if (!rt->rt_gateway)
                        rt->rt_gateway = daddr;
 
                rcu_assign_pointer(fnhe->fnhe_rth, rt);
@@ -2424,19 +2448,22 @@ static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_gc_elasticity __read_mostly   = 8;
 
-static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
+static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
                                        void __user *buffer,
                                        size_t *lenp, loff_t *ppos)
 {
+       struct net *net = (struct net *)__ctl->extra1;
+
        if (write) {
-               rt_cache_flush((struct net *)__ctl->extra1);
+               rt_cache_flush(net);
+               fnhe_genid_bump(net);
                return 0;
        }
 
        return -EINVAL;
 }
 
-static ctl_table ipv4_route_table[] = {
+static struct ctl_table ipv4_route_table[] = {
        {
                .procname       = "gc_thresh",
                .data           = &ipv4_dst_ops.gc_thresh,
@@ -2604,6 +2631,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
 static __net_init int rt_genid_init(struct net *net)
 {
        atomic_set(&net->rt_genid, 0);
+       atomic_set(&net->fnhe_genid, 0);
        get_random_bytes(&net->ipv4.dev_addr_genid,
                         sizeof(net->ipv4.dev_addr_genid));
        return 0;
index fa2f63fc453b936b1660b79146b67edf17ae687a..b2c123c44d6947afe1f6d588808643c5db3be9b8 100644 (file)
@@ -49,13 +49,13 @@ static void set_local_port_range(int range[2])
 }
 
 /* Validate changes from /proc interface. */
-static int ipv4_local_port_range(ctl_table *table, int write,
+static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 void __user *buffer,
                                 size_t *lenp, loff_t *ppos)
 {
        int ret;
        int range[2];
-       ctl_table tmp = {
+       struct ctl_table tmp = {
                .data = &range,
                .maxlen = sizeof(range),
                .mode = table->mode,
@@ -100,7 +100,7 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
 }
 
 /* Validate changes from /proc interface. */
-static int ipv4_ping_group_range(ctl_table *table, int write,
+static int ipv4_ping_group_range(struct ctl_table *table, int write,
                                 void __user *buffer,
                                 size_t *lenp, loff_t *ppos)
 {
@@ -108,7 +108,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
        int ret;
        gid_t urange[2];
        kgid_t low, high;
-       ctl_table tmp = {
+       struct ctl_table tmp = {
                .data = &urange,
                .maxlen = sizeof(urange),
                .mode = table->mode,
@@ -135,11 +135,11 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
        return ret;
 }
 
-static int proc_tcp_congestion_control(ctl_table *ctl, int write,
+static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
                                       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        char val[TCP_CA_NAME_MAX];
-       ctl_table tbl = {
+       struct ctl_table tbl = {
                .data = val,
                .maxlen = TCP_CA_NAME_MAX,
        };
@@ -153,12 +153,12 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
        return ret;
 }
 
-static int proc_tcp_available_congestion_control(ctl_table *ctl,
+static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
                                                 int write,
                                                 void __user *buffer, size_t *lenp,
                                                 loff_t *ppos)
 {
-       ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
+       struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
        int ret;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_USER);
@@ -170,12 +170,12 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
        return ret;
 }
 
-static int proc_allowed_congestion_control(ctl_table *ctl,
+static int proc_allowed_congestion_control(struct ctl_table *ctl,
                                           int write,
                                           void __user *buffer, size_t *lenp,
                                           loff_t *ppos)
 {
-       ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
+       struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
        int ret;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_USER);
@@ -190,7 +190,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
        return ret;
 }
 
-static int ipv4_tcp_mem(ctl_table *ctl, int write,
+static int ipv4_tcp_mem(struct ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp,
                           loff_t *ppos)
 {
@@ -201,7 +201,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
        struct mem_cgroup *memcg;
 #endif
 
-       ctl_table tmp = {
+       struct ctl_table tmp = {
                .data = &vec,
                .maxlen = sizeof(vec),
                .mode = ctl->mode,
@@ -233,10 +233,11 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
        return 0;
 }
 
-static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
-                                size_t *lenp, loff_t *ppos)
+static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
+                                void __user *buffer, size_t *lenp,
+                                loff_t *ppos)
 {
-       ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+       struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
        struct tcp_fastopen_context *ctxt;
        int ret;
        u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
index dcb116dde2168759025d315f4e2d3b77994a276a..46ed9afd1f5e1c42219c9f0b1b3c2cf508e99b6b 100644 (file)
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
+#include <net/ll_poll.h>
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 
@@ -436,6 +437,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        struct sock *sk = sock->sk;
        const struct tcp_sock *tp = tcp_sk(sk);
 
+       sock_rps_record_flow(sk);
+
        sock_poll_wait(file, sk_sleep(sk), wait);
        if (sk->sk_state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
@@ -1551,6 +1554,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct sk_buff *skb;
        u32 urg_hole = 0;
 
+       if (sk_valid_ll(sk) && skb_queue_empty(&sk->sk_receive_queue)
+           && (sk->sk_state == TCP_ESTABLISHED))
+               sk_poll_ll(sk, nonblock);
+
        lock_sock(sk);
 
        err = -ENOTCONN;
@@ -2875,229 +2882,9 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
-       netdev_features_t features)
-{
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
-       struct tcphdr *th;
-       unsigned int thlen;
-       unsigned int seq;
-       __be32 delta;
-       unsigned int oldlen;
-       unsigned int mss;
-       struct sk_buff *gso_skb = skb;
-       __sum16 newcheck;
-
-       if (!pskb_may_pull(skb, sizeof(*th)))
-               goto out;
-
-       th = tcp_hdr(skb);
-       thlen = th->doff * 4;
-       if (thlen < sizeof(*th))
-               goto out;
-
-       if (!pskb_may_pull(skb, thlen))
-               goto out;
-
-       oldlen = (u16)~skb->len;
-       __skb_pull(skb, thlen);
-
-       mss = skb_shinfo(skb)->gso_size;
-       if (unlikely(skb->len <= mss))
-               goto out;
-
-       if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
-               /* Packet is from an untrusted source, reset gso_segs. */
-               int type = skb_shinfo(skb)->gso_type;
-
-               if (unlikely(type &
-                            ~(SKB_GSO_TCPV4 |
-                              SKB_GSO_DODGY |
-                              SKB_GSO_TCP_ECN |
-                              SKB_GSO_TCPV6 |
-                              SKB_GSO_GRE |
-                              SKB_GSO_UDP_TUNNEL |
-                              0) ||
-                            !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
-                       goto out;
-
-               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
-
-               segs = NULL;
-               goto out;
-       }
-
-       segs = skb_segment(skb, features);
-       if (IS_ERR(segs))
-               goto out;
-
-       delta = htonl(oldlen + (thlen + mss));
-
-       skb = segs;
-       th = tcp_hdr(skb);
-       seq = ntohl(th->seq);
-
-       newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
-                                              (__force u32)delta));
-
-       do {
-               th->fin = th->psh = 0;
-               th->check = newcheck;
-
-               if (skb->ip_summed != CHECKSUM_PARTIAL)
-                       th->check =
-                            csum_fold(csum_partial(skb_transport_header(skb),
-                                                   thlen, skb->csum));
-
-               seq += mss;
-               skb = skb->next;
-               th = tcp_hdr(skb);
-
-               th->seq = htonl(seq);
-               th->cwr = 0;
-       } while (skb->next);
-
-       /* Following permits TCP Small Queues to work well with GSO :
-        * The callback to TCP stack will be called at the time last frag
-        * is freed at TX completion, and not right now when gso_skb
-        * is freed by GSO engine
-        */
-       if (gso_skb->destructor == tcp_wfree) {
-               swap(gso_skb->sk, skb->sk);
-               swap(gso_skb->destructor, skb->destructor);
-               swap(gso_skb->truesize, skb->truesize);
-       }
-
-       delta = htonl(oldlen + (skb->tail - skb->transport_header) +
-                     skb->data_len);
-       th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
-                               (__force u32)delta));
-       if (skb->ip_summed != CHECKSUM_PARTIAL)
-               th->check = csum_fold(csum_partial(skb_transport_header(skb),
-                                                  thlen, skb->csum));
-
-out:
-       return segs;
-}
-EXPORT_SYMBOL(tcp_tso_segment);
-
-struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
-{
-       struct sk_buff **pp = NULL;
-       struct sk_buff *p;
-       struct tcphdr *th;
-       struct tcphdr *th2;
-       unsigned int len;
-       unsigned int thlen;
-       __be32 flags;
-       unsigned int mss = 1;
-       unsigned int hlen;
-       unsigned int off;
-       int flush = 1;
-       int i;
-
-       off = skb_gro_offset(skb);
-       hlen = off + sizeof(*th);
-       th = skb_gro_header_fast(skb, off);
-       if (skb_gro_header_hard(skb, hlen)) {
-               th = skb_gro_header_slow(skb, hlen, off);
-               if (unlikely(!th))
-                       goto out;
-       }
-
-       thlen = th->doff * 4;
-       if (thlen < sizeof(*th))
-               goto out;
-
-       hlen = off + thlen;
-       if (skb_gro_header_hard(skb, hlen)) {
-               th = skb_gro_header_slow(skb, hlen, off);
-               if (unlikely(!th))
-                       goto out;
-       }
-
-       skb_gro_pull(skb, thlen);
-
-       len = skb_gro_len(skb);
-       flags = tcp_flag_word(th);
-
-       for (; (p = *head); head = &p->next) {
-               if (!NAPI_GRO_CB(p)->same_flow)
-                       continue;
-
-               th2 = tcp_hdr(p);
-
-               if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
-                       NAPI_GRO_CB(p)->same_flow = 0;
-                       continue;
-               }
-
-               goto found;
-       }
-
-       goto out_check_final;
-
-found:
-       flush = NAPI_GRO_CB(p)->flush;
-       flush |= (__force int)(flags & TCP_FLAG_CWR);
-       flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
-                 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
-       flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
-       for (i = sizeof(*th); i < thlen; i += 4)
-               flush |= *(u32 *)((u8 *)th + i) ^
-                        *(u32 *)((u8 *)th2 + i);
-
-       mss = skb_shinfo(p)->gso_size;
-
-       flush |= (len - 1) >= mss;
-       flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
-
-       if (flush || skb_gro_receive(head, skb)) {
-               mss = 1;
-               goto out_check_final;
-       }
-
-       p = *head;
-       th2 = tcp_hdr(p);
-       tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
-
-out_check_final:
-       flush = len < mss;
-       flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
-                                       TCP_FLAG_RST | TCP_FLAG_SYN |
-                                       TCP_FLAG_FIN));
-
-       if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
-               pp = head;
-
-out:
-       NAPI_GRO_CB(skb)->flush |= flush;
-
-       return pp;
-}
-EXPORT_SYMBOL(tcp_gro_receive);
-
-int tcp_gro_complete(struct sk_buff *skb)
-{
-       struct tcphdr *th = tcp_hdr(skb);
-
-       skb->csum_start = skb_transport_header(skb) - skb->head;
-       skb->csum_offset = offsetof(struct tcphdr, check);
-       skb->ip_summed = CHECKSUM_PARTIAL;
-
-       skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
-
-       if (th->cwr)
-               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
-       return 0;
-}
-EXPORT_SYMBOL(tcp_gro_complete);
-
 #ifdef CONFIG_TCP_MD5SIG
-static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
-static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
+static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
+static DEFINE_MUTEX(tcp_md5sig_mutex);
 
 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
 {
@@ -3112,30 +2899,14 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
        free_percpu(pool);
 }
 
-void tcp_free_md5sig_pool(void)
-{
-       struct tcp_md5sig_pool __percpu *pool = NULL;
-
-       spin_lock_bh(&tcp_md5sig_pool_lock);
-       if (--tcp_md5sig_users == 0) {
-               pool = tcp_md5sig_pool;
-               tcp_md5sig_pool = NULL;
-       }
-       spin_unlock_bh(&tcp_md5sig_pool_lock);
-       if (pool)
-               __tcp_free_md5sig_pool(pool);
-}
-EXPORT_SYMBOL(tcp_free_md5sig_pool);
-
-static struct tcp_md5sig_pool __percpu *
-__tcp_alloc_md5sig_pool(struct sock *sk)
+static void __tcp_alloc_md5sig_pool(void)
 {
        int cpu;
        struct tcp_md5sig_pool __percpu *pool;
 
        pool = alloc_percpu(struct tcp_md5sig_pool);
        if (!pool)
-               return NULL;
+               return;
 
        for_each_possible_cpu(cpu) {
                struct crypto_hash *hash;
@@ -3146,53 +2917,27 @@ __tcp_alloc_md5sig_pool(struct sock *sk)
 
                per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
        }
-       return pool;
+       /* before setting tcp_md5sig_pool, we must commit all writes
+        * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
+        */
+       smp_wmb();
+       tcp_md5sig_pool = pool;
+       return;
 out_free:
        __tcp_free_md5sig_pool(pool);
-       return NULL;
 }
 
-struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
+bool tcp_alloc_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool __percpu *pool;
-       bool alloc = false;
-
-retry:
-       spin_lock_bh(&tcp_md5sig_pool_lock);
-       pool = tcp_md5sig_pool;
-       if (tcp_md5sig_users++ == 0) {
-               alloc = true;
-               spin_unlock_bh(&tcp_md5sig_pool_lock);
-       } else if (!pool) {
-               tcp_md5sig_users--;
-               spin_unlock_bh(&tcp_md5sig_pool_lock);
-               cpu_relax();
-               goto retry;
-       } else
-               spin_unlock_bh(&tcp_md5sig_pool_lock);
-
-       if (alloc) {
-               /* we cannot hold spinlock here because this may sleep. */
-               struct tcp_md5sig_pool __percpu *p;
-
-               p = __tcp_alloc_md5sig_pool(sk);
-               spin_lock_bh(&tcp_md5sig_pool_lock);
-               if (!p) {
-                       tcp_md5sig_users--;
-                       spin_unlock_bh(&tcp_md5sig_pool_lock);
-                       return NULL;
-               }
-               pool = tcp_md5sig_pool;
-               if (pool) {
-                       /* oops, it has already been assigned. */
-                       spin_unlock_bh(&tcp_md5sig_pool_lock);
-                       __tcp_free_md5sig_pool(p);
-               } else {
-                       tcp_md5sig_pool = pool = p;
-                       spin_unlock_bh(&tcp_md5sig_pool_lock);
-               }
+       if (unlikely(!tcp_md5sig_pool)) {
+               mutex_lock(&tcp_md5sig_mutex);
+
+               if (!tcp_md5sig_pool)
+                       __tcp_alloc_md5sig_pool();
+
+               mutex_unlock(&tcp_md5sig_mutex);
        }
-       return pool;
+       return tcp_md5sig_pool != NULL;
 }
 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
 
@@ -3209,28 +2954,15 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
        struct tcp_md5sig_pool __percpu *p;
 
        local_bh_disable();
-
-       spin_lock(&tcp_md5sig_pool_lock);
-       p = tcp_md5sig_pool;
-       if (p)
-               tcp_md5sig_users++;
-       spin_unlock(&tcp_md5sig_pool_lock);
-
+       p = ACCESS_ONCE(tcp_md5sig_pool);
        if (p)
-               return this_cpu_ptr(p);
+               return __this_cpu_ptr(p);
 
        local_bh_enable();
        return NULL;
 }
 EXPORT_SYMBOL(tcp_get_md5sig_pool);
 
-void tcp_put_md5sig_pool(void)
-{
-       local_bh_enable();
-       tcp_free_md5sig_pool();
-}
-EXPORT_SYMBOL(tcp_put_md5sig_pool);
-
 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
                        const struct tcphdr *th)
 {
@@ -3269,8 +3001,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 
        for (i = 0; i < shi->nr_frags; ++i) {
                const struct skb_frag_struct *f = &shi->frags[i];
-               struct page *page = skb_frag_page(f);
-               sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+               unsigned int offset = f->page_offset;
+               struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
+
+               sg_set_page(&sg, page, skb_frag_size(f),
+                           offset_in_page(offset));
                if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
                        return 1;
        }
index 08bbe609652891f593ccc2f48ec9975bbe2f335d..28af45abe0622fabac4d53ab651099a580808766 100644 (file)
@@ -347,24 +347,13 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 }
 
 /* 3. Tuning rcvbuf, when connection enters established state. */
-
 static void tcp_fixup_rcvbuf(struct sock *sk)
 {
        u32 mss = tcp_sk(sk)->advmss;
-       u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
        int rcvmem;
 
-       /* Limit to 10 segments if mss <= 1460,
-        * or 14600/mss segments, with a minimum of two segments.
-        */
-       if (mss > 1460)
-               icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
-
-       rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
-       while (tcp_win_from_space(rcvmem) < mss)
-               rcvmem += 128;
-
-       rcvmem *= icwnd;
+       rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
+                tcp_default_init_rwnd(mss);
 
        if (sk->sk_rcvbuf < rcvmem)
                sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
@@ -1257,8 +1246,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
 
        if (skb == tp->retransmit_skb_hint)
                tp->retransmit_skb_hint = prev;
-       if (skb == tp->scoreboard_skb_hint)
-               tp->scoreboard_skb_hint = prev;
        if (skb == tp->lost_skb_hint) {
                tp->lost_skb_hint = prev;
                tp->lost_cnt_hint -= tcp_skb_pcount(prev);
@@ -1966,20 +1953,6 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
        return true;
 }
 
-static inline int tcp_skb_timedout(const struct sock *sk,
-                                  const struct sk_buff *skb)
-{
-       return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
-}
-
-static inline int tcp_head_timedout(const struct sock *sk)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-
-       return tp->packets_out &&
-              tcp_skb_timedout(sk, tcp_write_queue_head(sk));
-}
-
 /* Linux NewReno/SACK/FACK/ECN state machine.
  * --------------------------------------
  *
@@ -2086,12 +2059,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
        if (tcp_dupack_heuristics(tp) > tp->reordering)
                return true;
 
-       /* Trick#3 : when we use RFC2988 timer restart, fast
-        * retransmit can be triggered by timeout of queue head.
-        */
-       if (tcp_is_fack(tp) && tcp_head_timedout(sk))
-               return true;
-
        /* Trick#4: It is still not OK... But will it be useful to delay
         * recovery more?
         */
@@ -2128,44 +2095,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
        return false;
 }
 
-/* New heuristics: it is possible only after we switched to restart timer
- * each time when something is ACKed. Hence, we can detect timed out packets
- * during fast retransmit without falling to slow start.
- *
- * Usefulness of this as is very questionable, since we should know which of
- * the segments is the next to timeout which is relatively expensive to find
- * in general case unless we add some data structure just for that. The
- * current approach certainly won't find the right one too often and when it
- * finally does find _something_ it usually marks large part of the window
- * right away (because a retransmission with a larger timestamp blocks the
- * loop from advancing). -ij
- */
-static void tcp_timeout_skbs(struct sock *sk)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb;
-
-       if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
-               return;
-
-       skb = tp->scoreboard_skb_hint;
-       if (tp->scoreboard_skb_hint == NULL)
-               skb = tcp_write_queue_head(sk);
-
-       tcp_for_write_queue_from(skb, sk) {
-               if (skb == tcp_send_head(sk))
-                       break;
-               if (!tcp_skb_timedout(sk, skb))
-                       break;
-
-               tcp_skb_mark_lost(tp, skb);
-       }
-
-       tp->scoreboard_skb_hint = skb;
-
-       tcp_verify_left_out(tp);
-}
-
 /* Detect loss in event "A" above by marking head of queue up as lost.
  * For FACK or non-SACK(Reno) senders, the first "packets" number of segments
  * are considered lost. For RFC3517 SACK, a segment is considered lost if it
@@ -2251,8 +2180,6 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
                else if (fast_rexmit)
                        tcp_mark_head_lost(sk, 1, 1);
        }
-
-       tcp_timeout_skbs(sk);
 }
 
 /* CWND moderation, preventing bursts due to too big ACKs
@@ -2307,10 +2234,22 @@ static void DBGUNDO(struct sock *sk, const char *msg)
 #define DBGUNDO(x...) do { } while (0)
 #endif
 
-static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
+static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
+       if (unmark_loss) {
+               struct sk_buff *skb;
+
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
+                       TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
+               }
+               tp->lost_out = 0;
+               tcp_clear_all_retrans_hints(tp);
+       }
+
        if (tp->prior_ssthresh) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -2319,7 +2258,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
                else
                        tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
 
-               if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) {
+               if (tp->prior_ssthresh > tp->snd_ssthresh) {
                        tp->snd_ssthresh = tp->prior_ssthresh;
                        TCP_ECN_withdraw_cwr(tp);
                }
@@ -2327,6 +2266,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
                tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
        }
        tp->snd_cwnd_stamp = tcp_time_stamp;
+       tp->undo_marker = 0;
 }
 
 static inline bool tcp_may_undo(const struct tcp_sock *tp)
@@ -2346,14 +2286,13 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                 * or our original transmission succeeded.
                 */
                DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
-               tcp_undo_cwr(sk, true);
+               tcp_undo_cwnd_reduction(sk, false);
                if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
                        mib_idx = LINUX_MIB_TCPLOSSUNDO;
                else
                        mib_idx = LINUX_MIB_TCPFULLUNDO;
 
                NET_INC_STATS_BH(sock_net(sk), mib_idx);
-               tp->undo_marker = 0;
        }
        if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
                /* Hold old state until something *above* high_seq
@@ -2367,16 +2306,17 @@ static bool tcp_try_undo_recovery(struct sock *sk)
 }
 
 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
-static void tcp_try_undo_dsack(struct sock *sk)
+static bool tcp_try_undo_dsack(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tp->undo_marker && !tp->undo_retrans) {
                DBGUNDO(sk, "D-SACK");
-               tcp_undo_cwr(sk, true);
-               tp->undo_marker = 0;
+               tcp_undo_cwnd_reduction(sk, false);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
+               return true;
        }
+       return false;
 }
 
 /* We can clear retrans_stamp when there are no retransmissions in the
@@ -2408,60 +2348,20 @@ static bool tcp_any_retrans_done(const struct sock *sk)
        return false;
 }
 
-/* Undo during fast recovery after partial ACK. */
-
-static int tcp_try_undo_partial(struct sock *sk, int acked)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       /* Partial ACK arrived. Force Hoe's retransmit. */
-       int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering);
-
-       if (tcp_may_undo(tp)) {
-               /* Plain luck! Hole if filled with delayed
-                * packet, rather than with a retransmit.
-                */
-               if (!tcp_any_retrans_done(sk))
-                       tp->retrans_stamp = 0;
-
-               tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
-
-               DBGUNDO(sk, "Hoe");
-               tcp_undo_cwr(sk, false);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
-
-               /* So... Do not make Hoe's retransmit yet.
-                * If the first packet was delayed, the rest
-                * ones are most probably delayed as well.
-                */
-               failed = 0;
-       }
-       return failed;
-}
-
 /* Undo during loss recovery after partial ACK or using F-RTO. */
 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (frto_undo || tcp_may_undo(tp)) {
-               struct sk_buff *skb;
-               tcp_for_write_queue(skb, sk) {
-                       if (skb == tcp_send_head(sk))
-                               break;
-                       TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
-               }
-
-               tcp_clear_all_retrans_hints(tp);
+               tcp_undo_cwnd_reduction(sk, true);
 
                DBGUNDO(sk, "partial loss");
-               tp->lost_out = 0;
-               tcp_undo_cwr(sk, true);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
                if (frto_undo)
                        NET_INC_STATS_BH(sock_net(sk),
                                         LINUX_MIB_TCPSPURIOUSRTOS);
                inet_csk(sk)->icsk_retransmits = 0;
-               tp->undo_marker = 0;
                if (frto_undo || tcp_is_sack(tp))
                        tcp_set_ca_state(sk, TCP_CA_Open);
                return true;
@@ -2494,12 +2394,14 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
        TCP_ECN_queue_cwr(tp);
 }
 
-static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
+static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
                               int fast_rexmit)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int sndcnt = 0;
        int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
+       int newly_acked_sacked = prior_unsacked -
+                                (tp->packets_out - tp->sacked_out);
 
        tp->prr_delivered += newly_acked_sacked;
        if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
@@ -2556,7 +2458,7 @@ static void tcp_try_keep_open(struct sock *sk)
        }
 }
 
-static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked)
+static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2573,7 +2475,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked)
                if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
                        tcp_moderate_cwnd(tp);
        } else {
-               tcp_cwnd_reduction(sk, newly_acked_sacked, 0);
+               tcp_cwnd_reduction(sk, prior_unsacked, 0);
        }
 }
 
@@ -2731,6 +2633,40 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
        tcp_xmit_retransmit_queue(sk);
 }
 
+/* Undo during fast recovery after partial ACK. */
+static bool tcp_try_undo_partial(struct sock *sk, const int acked,
+                                const int prior_unsacked)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (tp->undo_marker && tcp_packet_delayed(tp)) {
+               /* Plain luck! Hole if filled with delayed
+                * packet, rather than with a retransmit.
+                */
+               tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
+
+               /* We are getting evidence that the reordering degree is higher
+                * than we realized. If there are no retransmits out then we
+                * can undo. Otherwise we clock out new packets but do not
+                * mark more packets lost or retransmit more.
+                */
+               if (tp->retrans_out) {
+                       tcp_cwnd_reduction(sk, prior_unsacked, 0);
+                       return true;
+               }
+
+               if (!tcp_any_retrans_done(sk))
+                       tp->retrans_stamp = 0;
+
+               DBGUNDO(sk, "partial recovery");
+               tcp_undo_cwnd_reduction(sk, true);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
+               tcp_try_keep_open(sk);
+               return true;
+       }
+       return false;
+}
+
 /* Process an event, which can update packets-in-flight not trivially.
  * Main goal of this function is to calculate new estimate for left_out,
  * taking into account both packets sitting in receiver's buffer and
@@ -2742,15 +2678,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
  * It does _not_ decide what to send, it is made in function
  * tcp_xmit_retransmit_queue().
  */
-static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-                                 int prior_sacked, bool is_dupack,
-                                 int flag)
+static void tcp_fastretrans_alert(struct sock *sk, const int acked,
+                                 const int prior_unsacked,
+                                 bool is_dupack, int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
+       bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
-       int newly_acked_sacked = 0;
        int fast_rexmit = 0;
 
        if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2802,9 +2737,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                if (!(flag & FLAG_SND_UNA_ADVANCED)) {
                        if (tcp_is_reno(tp) && is_dupack)
                                tcp_add_reno_sack(sk);
-               } else
-                       do_lost = tcp_try_undo_partial(sk, pkts_acked);
-               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+               } else {
+                       if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+                               return;
+                       /* Partial ACK arrived. Force fast retransmit. */
+                       do_lost = tcp_is_reno(tp) ||
+                                 tcp_fackets_out(tp) > tp->reordering;
+               }
+               if (tcp_try_undo_dsack(sk)) {
+                       tcp_try_keep_open(sk);
+                       return;
+               }
                break;
        case TCP_CA_Loss:
                tcp_process_loss(sk, flag, is_dupack);
@@ -2818,13 +2761,12 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        if (is_dupack)
                                tcp_add_reno_sack(sk);
                }
-               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
 
                if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
 
                if (!tcp_time_to_recover(sk, flag)) {
-                       tcp_try_to_open(sk, flag, newly_acked_sacked);
+                       tcp_try_to_open(sk, flag, prior_unsacked);
                        return;
                }
 
@@ -2844,9 +2786,9 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                fast_rexmit = 1;
        }
 
-       if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
+       if (do_lost)
                tcp_update_scoreboard(sk, fast_rexmit);
-       tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit);
+       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3077,7 +3019,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
                tcp_unlink_write_queue(skb, sk);
                sk_wmem_free_skb(sk, skb);
-               tp->scoreboard_skb_hint = NULL;
                if (skb == tp->retransmit_skb_hint)
                        tp->retransmit_skb_hint = NULL;
                if (skb == tp->lost_skb_hint)
@@ -3330,9 +3271,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        bool is_dupack = false;
        u32 prior_in_flight;
        u32 prior_fackets;
-       int prior_packets;
-       int prior_sacked = tp->sacked_out;
-       int pkts_acked = 0;
+       int prior_packets = tp->packets_out;
+       const int prior_unsacked = tp->packets_out - tp->sacked_out;
+       int acked = 0; /* Number of packets newly acked */
 
        /* If the ack is older than previous acks
         * then we can probably ignore it.
@@ -3403,21 +3344,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        sk->sk_err_soft = 0;
        icsk->icsk_probes_out = 0;
        tp->rcv_tstamp = tcp_time_stamp;
-       prior_packets = tp->packets_out;
        if (!prior_packets)
                goto no_queue;
 
        /* See if we can take anything off of the retransmit queue. */
+       acked = tp->packets_out;
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
-
-       pkts_acked = prior_packets - tp->packets_out;
+       acked -= tp->packets_out;
 
        if (tcp_ack_is_dubious(sk, flag)) {
                /* Advance CWND, if state allows this. */
                if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
+               tcp_fastretrans_alert(sk, acked, prior_unsacked,
                                      is_dupack, flag);
        } else {
                if (flag & FLAG_DATA_ACKED)
@@ -3440,7 +3380,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
+               tcp_fastretrans_alert(sk, acked, prior_unsacked,
                                      is_dupack, flag);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
@@ -3463,7 +3403,7 @@ old_ack:
         */
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
-               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
+               tcp_fastretrans_alert(sk, acked, prior_unsacked,
                                      is_dupack, flag);
        }
 
@@ -3777,6 +3717,7 @@ void tcp_reset(struct sock *sk)
 static void tcp_fin(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct dst_entry *dst;
 
        inet_csk_schedule_ack(sk);
 
@@ -3788,7 +3729,9 @@ static void tcp_fin(struct sock *sk)
        case TCP_ESTABLISHED:
                /* Move to CLOSE_WAIT */
                tcp_set_state(sk, TCP_CLOSE_WAIT);
-               inet_csk(sk)->icsk_ack.pingpong = 1;
+               dst = __sk_dst_get(sk);
+               if (!dst || !dst_metric(dst, RTAX_QUICKACK))
+                       inet_csk(sk)->icsk_ack.pingpong = 1;
                break;
 
        case TCP_CLOSE_WAIT:
@@ -5598,6 +5541,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct request_sock *req;
        int queued = 0;
+       bool acceptable;
 
        tp->rx_opt.saw_tstamp = 0;
 
@@ -5668,157 +5612,147 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                return 0;
 
        /* step 5: check the ACK field */
-       if (true) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
-                                                 FLAG_UPDATE_TS_RECENT) > 0;
-
-               switch (sk->sk_state) {
-               case TCP_SYN_RECV:
-                       if (acceptable) {
-                               /* Once we leave TCP_SYN_RECV, we no longer
-                                * need req so release it.
-                                */
-                               if (req) {
-                                       tcp_synack_rtt_meas(sk, req);
-                                       tp->total_retrans = req->num_retrans;
-
-                                       reqsk_fastopen_remove(sk, req, false);
-                               } else {
-                                       /* Make sure socket is routed, for
-                                        * correct metrics.
-                                        */
-                                       icsk->icsk_af_ops->rebuild_header(sk);
-                                       tcp_init_congestion_control(sk);
+       acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
+                                     FLAG_UPDATE_TS_RECENT) > 0;
 
-                                       tcp_mtup_init(sk);
-                                       tcp_init_buffer_space(sk);
-                                       tp->copied_seq = tp->rcv_nxt;
-                               }
-                               smp_mb();
-                               tcp_set_state(sk, TCP_ESTABLISHED);
-                               sk->sk_state_change(sk);
-
-                               /* Note, that this wakeup is only for marginal
-                                * crossed SYN case. Passively open sockets
-                                * are not waked up, because sk->sk_sleep ==
-                                * NULL and sk->sk_socket == NULL.
-                                */
-                               if (sk->sk_socket)
-                                       sk_wake_async(sk,
-                                                     SOCK_WAKE_IO, POLL_OUT);
-
-                               tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
-                               tp->snd_wnd = ntohs(th->window) <<
-                                             tp->rx_opt.snd_wscale;
-                               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-
-                               if (tp->rx_opt.tstamp_ok)
-                                       tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
-
-                               if (req) {
-                                       /* Re-arm the timer because data may
-                                        * have been sent out. This is similar
-                                        * to the regular data transmission case
-                                        * when new data has just been ack'ed.
-                                        *
-                                        * (TFO) - we could try to be more
-                                        * aggressive and retranmitting any data
-                                        * sooner based on when they were sent
-                                        * out.
-                                        */
-                                       tcp_rearm_rto(sk);
-                               } else
-                                       tcp_init_metrics(sk);
+       switch (sk->sk_state) {
+       case TCP_SYN_RECV:
+               if (!acceptable)
+                       return 1;
 
-                               /* Prevent spurious tcp_cwnd_restart() on
-                                * first data packet.
-                                */
-                               tp->lsndtime = tcp_time_stamp;
+               /* Once we leave TCP_SYN_RECV, we no longer need req
+                * so release it.
+                */
+               if (req) {
+                       tcp_synack_rtt_meas(sk, req);
+                       tp->total_retrans = req->num_retrans;
 
-                               tcp_initialize_rcv_mss(sk);
-                               tcp_fast_path_on(tp);
-                       } else {
-                               return 1;
-                       }
-                       break;
+                       reqsk_fastopen_remove(sk, req, false);
+               } else {
+                       /* Make sure socket is routed, for correct metrics. */
+                       icsk->icsk_af_ops->rebuild_header(sk);
+                       tcp_init_congestion_control(sk);
+
+                       tcp_mtup_init(sk);
+                       tcp_init_buffer_space(sk);
+                       tp->copied_seq = tp->rcv_nxt;
+               }
+               smp_mb();
+               tcp_set_state(sk, TCP_ESTABLISHED);
+               sk->sk_state_change(sk);
+
+               /* Note, that this wakeup is only for marginal crossed SYN case.
+                * Passively open sockets are not waked up, because
+                * sk->sk_sleep == NULL and sk->sk_socket == NULL.
+                */
+               if (sk->sk_socket)
+                       sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+
+               tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+               tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
-               case TCP_FIN_WAIT1:
-                       /* If we enter the TCP_FIN_WAIT1 state and we are a
-                        * Fast Open socket and this is the first acceptable
-                        * ACK we have received, this would have acknowledged
-                        * our SYNACK so stop the SYNACK timer.
+               if (tp->rx_opt.tstamp_ok)
+                       tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+
+               if (req) {
+                       /* Re-arm the timer because data may have been sent out.
+                        * This is similar to the regular data transmission case
+                        * when new data has just been ack'ed.
+                        *
+                        * (TFO) - we could try to be more aggressive and
+                        * retransmitting any data sooner based on when they
+                        * are sent out.
                         */
-                       if (req != NULL) {
-                               /* Return RST if ack_seq is invalid.
-                                * Note that RFC793 only says to generate a
-                                * DUPACK for it but for TCP Fast Open it seems
-                                * better to treat this case like TCP_SYN_RECV
-                                * above.
-                                */
-                               if (!acceptable)
-                                       return 1;
-                               /* We no longer need the request sock. */
-                               reqsk_fastopen_remove(sk, req, false);
-                               tcp_rearm_rto(sk);
-                       }
-                       if (tp->snd_una == tp->write_seq) {
-                               struct dst_entry *dst;
-
-                               tcp_set_state(sk, TCP_FIN_WAIT2);
-                               sk->sk_shutdown |= SEND_SHUTDOWN;
-
-                               dst = __sk_dst_get(sk);
-                               if (dst)
-                                       dst_confirm(dst);
-
-                               if (!sock_flag(sk, SOCK_DEAD))
-                                       /* Wake up lingering close() */
-                                       sk->sk_state_change(sk);
-                               else {
-                                       int tmo;
-
-                                       if (tp->linger2 < 0 ||
-                                           (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
-                                            after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
-                                               tcp_done(sk);
-                                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
-                                               return 1;
-                                       }
+                       tcp_rearm_rto(sk);
+               } else
+                       tcp_init_metrics(sk);
 
-                                       tmo = tcp_fin_time(sk);
-                                       if (tmo > TCP_TIMEWAIT_LEN) {
-                                               inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
-                                       } else if (th->fin || sock_owned_by_user(sk)) {
-                                               /* Bad case. We could lose such FIN otherwise.
-                                                * It is not a big problem, but it looks confusing
-                                                * and not so rare event. We still can lose it now,
-                                                * if it spins in bh_lock_sock(), but it is really
-                                                * marginal case.
-                                                */
-                                               inet_csk_reset_keepalive_timer(sk, tmo);
-                                       } else {
-                                               tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
-                                               goto discard;
-                                       }
-                               }
-                       }
-                       break;
+               /* Prevent spurious tcp_cwnd_restart() on first data packet */
+               tp->lsndtime = tcp_time_stamp;
 
-               case TCP_CLOSING:
-                       if (tp->snd_una == tp->write_seq) {
-                               tcp_time_wait(sk, TCP_TIME_WAIT, 0);
-                               goto discard;
-                       }
+               tcp_initialize_rcv_mss(sk);
+               tcp_fast_path_on(tp);
+               break;
+
+       case TCP_FIN_WAIT1: {
+               struct dst_entry *dst;
+               int tmo;
+
+               /* If we enter the TCP_FIN_WAIT1 state and we are a
+                * Fast Open socket and this is the first acceptable
+                * ACK we have received, this would have acknowledged
+                * our SYNACK so stop the SYNACK timer.
+                */
+               if (req != NULL) {
+                       /* Return RST if ack_seq is invalid.
+                        * Note that RFC793 only says to generate a
+                        * DUPACK for it but for TCP Fast Open it seems
+                        * better to treat this case like TCP_SYN_RECV
+                        * above.
+                        */
+                       if (!acceptable)
+                               return 1;
+                       /* We no longer need the request sock. */
+                       reqsk_fastopen_remove(sk, req, false);
+                       tcp_rearm_rto(sk);
+               }
+               if (tp->snd_una != tp->write_seq)
                        break;
 
-               case TCP_LAST_ACK:
-                       if (tp->snd_una == tp->write_seq) {
-                               tcp_update_metrics(sk);
-                               tcp_done(sk);
-                               goto discard;
-                       }
+               tcp_set_state(sk, TCP_FIN_WAIT2);
+               sk->sk_shutdown |= SEND_SHUTDOWN;
+
+               dst = __sk_dst_get(sk);
+               if (dst)
+                       dst_confirm(dst);
+
+               if (!sock_flag(sk, SOCK_DEAD)) {
+                       /* Wake up lingering close() */
+                       sk->sk_state_change(sk);
                        break;
                }
+
+               if (tp->linger2 < 0 ||
+                   (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+                    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
+                       tcp_done(sk);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+                       return 1;
+               }
+
+               tmo = tcp_fin_time(sk);
+               if (tmo > TCP_TIMEWAIT_LEN) {
+                       inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+               } else if (th->fin || sock_owned_by_user(sk)) {
+                       /* Bad case. We could lose such FIN otherwise.
+                        * It is not a big problem, but it looks confusing
+                        * and not so rare event. We still can lose it now,
+                        * if it spins in bh_lock_sock(), but it is really
+                        * marginal case.
+                        */
+                       inet_csk_reset_keepalive_timer(sk, tmo);
+               } else {
+                       tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
+                       goto discard;
+               }
+               break;
+       }
+
+       case TCP_CLOSING:
+               if (tp->snd_una == tp->write_seq) {
+                       tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+                       goto discard;
+               }
+               break;
+
+       case TCP_LAST_ACK:
+               if (tp->snd_una == tp->write_seq) {
+                       tcp_update_metrics(sk);
+                       tcp_done(sk);
+                       goto discard;
+               }
+               break;
        }
 
        /* step 6: check the URG bit */
index 719652305a2950a3407804136bf7f42dc1d18e15..1063bb83e34285697a1c68e8adc28b71660b8fcf 100644 (file)
@@ -75,6 +75,7 @@
 #include <net/netdma.h>
 #include <net/secure_seq.h>
 #include <net/tcp_memcontrol.h>
+#include <net/ll_poll.h>
 
 #include <linux/inet.h>
 #include <linux/ipv6.h>
@@ -545,8 +546,7 @@ out:
        sock_put(sk);
 }
 
-static void __tcp_v4_send_check(struct sk_buff *skb,
-                               __be32 saddr, __be32 daddr)
+void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 {
        struct tcphdr *th = tcp_hdr(skb);
 
@@ -571,23 +571,6 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_v4_send_check);
 
-int tcp_v4_gso_send_check(struct sk_buff *skb)
-{
-       const struct iphdr *iph;
-       struct tcphdr *th;
-
-       if (!pskb_may_pull(skb, sizeof(*th)))
-               return -EINVAL;
-
-       iph = ip_hdr(skb);
-       th = tcp_hdr(skb);
-
-       th->check = 0;
-       skb->ip_summed = CHECKSUM_PARTIAL;
-       __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
-       return 0;
-}
-
 /*
  *     This routine will send an RST to the other tcp.
  *
@@ -1026,7 +1009,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        key = sock_kmalloc(sk, sizeof(*key), gfp);
        if (!key)
                return -ENOMEM;
-       if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
+       if (!tcp_alloc_md5sig_pool()) {
                sock_kfree_s(sk, key, sizeof(*key));
                return -ENOMEM;
        }
@@ -1044,9 +1027,7 @@ EXPORT_SYMBOL(tcp_md5_do_add);
 
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_key *key;
-       struct tcp_md5sig_info *md5sig;
 
        key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
        if (!key)
@@ -1054,10 +1035,6 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
        hlist_del_rcu(&key->node);
        atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
        kfree_rcu(key, rcu);
-       md5sig = rcu_dereference_protected(tp->md5sig_info,
-                                          sock_owned_by_user(sk));
-       if (hlist_empty(&md5sig->head))
-               tcp_free_md5sig_pool();
        return 0;
 }
 EXPORT_SYMBOL(tcp_md5_do_del);
@@ -1071,8 +1048,6 @@ static void tcp_clear_md5_list(struct sock *sk)
 
        md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
 
-       if (!hlist_empty(&md5sig->head))
-               tcp_free_md5sig_pool();
        hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
                hlist_del_rcu(&key->node);
                atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
@@ -2019,6 +1994,7 @@ process:
        if (sk_filter(sk, skb))
                goto discard_and_relse;
 
+       sk_mark_ll(sk, skb);
        skb->dev = NULL;
 
        bh_lock_sock_nested(sk);
@@ -2803,52 +2779,6 @@ void tcp4_proc_exit(void)
 }
 #endif /* CONFIG_PROC_FS */
 
-struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
-{
-       const struct iphdr *iph = skb_gro_network_header(skb);
-       __wsum wsum;
-       __sum16 sum;
-
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
-                                 skb->csum)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       break;
-               }
-flush:
-               NAPI_GRO_CB(skb)->flush = 1;
-               return NULL;
-
-       case CHECKSUM_NONE:
-               wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-                                         skb_gro_len(skb), IPPROTO_TCP, 0);
-               sum = csum_fold(skb_checksum(skb,
-                                            skb_gro_offset(skb),
-                                            skb_gro_len(skb),
-                                            wsum));
-               if (sum)
-                       goto flush;
-
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-               break;
-       }
-
-       return tcp_gro_receive(head, skb);
-}
-
-int tcp4_gro_complete(struct sk_buff *skb)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-       struct tcphdr *th = tcp_hdr(skb);
-
-       th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
-                                 iph->saddr, iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-
-       return tcp_gro_complete(skb);
-}
-
 struct proto tcp_prot = {
        .name                   = "TCP",
        .owner                  = THIS_MODULE,
index 0f01788272595397cdc8c440f89d595bb6bed0dc..ab1c08658528e7737fd1683841a4d11701cbc0bb 100644 (file)
@@ -317,7 +317,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        key = tp->af_specific->md5_lookup(sk, sk);
                        if (key != NULL) {
                                tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
-                               if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
+                               if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
                                        BUG();
                        }
                } while (0);
@@ -358,10 +358,8 @@ void tcp_twsk_destructor(struct sock *sk)
 #ifdef CONFIG_TCP_MD5SIG
        struct tcp_timewait_sock *twsk = tcp_twsk(sk);
 
-       if (twsk->tw_md5_key) {
-               tcp_free_md5sig_pool();
+       if (twsk->tw_md5_key)
                kfree_rcu(twsk->tw_md5_key, rcu);
-       }
 #endif
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
new file mode 100644 (file)
index 0000000..3a7525e
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ *     IPV4 GSO/GRO offload support
+ *     Linux INET implementation
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ *
+ *     TCPv4 GSO/GRO support
+ */
+
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+#include <net/protocol.h>
+
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+                               netdev_features_t features)
+{
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       struct tcphdr *th;
+       unsigned int thlen;
+       unsigned int seq;
+       __be32 delta;
+       unsigned int oldlen;
+       unsigned int mss;
+       struct sk_buff *gso_skb = skb;
+       __sum16 newcheck;
+       bool ooo_okay, copy_destructor;
+
+       if (!pskb_may_pull(skb, sizeof(*th)))
+               goto out;
+
+       th = tcp_hdr(skb);
+       thlen = th->doff * 4;
+       if (thlen < sizeof(*th))
+               goto out;
+
+       if (!pskb_may_pull(skb, thlen))
+               goto out;
+
+       oldlen = (u16)~skb->len;
+       __skb_pull(skb, thlen);
+
+       mss = tcp_skb_mss(skb);
+       if (unlikely(skb->len <= mss))
+               goto out;
+
+       if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
+               /* Packet is from an untrusted source, reset gso_segs. */
+               int type = skb_shinfo(skb)->gso_type;
+
+               if (unlikely(type &
+                            ~(SKB_GSO_TCPV4 |
+                              SKB_GSO_DODGY |
+                              SKB_GSO_TCP_ECN |
+                              SKB_GSO_TCPV6 |
+                              SKB_GSO_GRE |
+                              SKB_GSO_MPLS |
+                              SKB_GSO_UDP_TUNNEL |
+                              0) ||
+                            !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
+                       goto out;
+
+               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
+
+               segs = NULL;
+               goto out;
+       }
+
+       copy_destructor = gso_skb->destructor == tcp_wfree;
+       ooo_okay = gso_skb->ooo_okay;
+       /* All segments but the first should have ooo_okay cleared */
+       skb->ooo_okay = 0;
+
+       segs = skb_segment(skb, features);
+       if (IS_ERR(segs))
+               goto out;
+
+       /* Only first segment might have ooo_okay set */
+       segs->ooo_okay = ooo_okay;
+
+       delta = htonl(oldlen + (thlen + mss));
+
+       skb = segs;
+       th = tcp_hdr(skb);
+       seq = ntohl(th->seq);
+
+       newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
+                                              (__force u32)delta));
+
+       do {
+               th->fin = th->psh = 0;
+               th->check = newcheck;
+
+               if (skb->ip_summed != CHECKSUM_PARTIAL)
+                       th->check =
+                            csum_fold(csum_partial(skb_transport_header(skb),
+                                                   thlen, skb->csum));
+
+               seq += mss;
+               if (copy_destructor) {
+                       skb->destructor = gso_skb->destructor;
+                       skb->sk = gso_skb->sk;
+                       /* {tcp|sock}_wfree() use exact truesize accounting :
+                        * sum(skb->truesize) MUST be exactly be gso_skb->truesize
+                        * So we account mss bytes of 'true size' for each segment.
+                        * The last segment will contain the remaining.
+                        */
+                       skb->truesize = mss;
+                       gso_skb->truesize -= mss;
+               }
+               skb = skb->next;
+               th = tcp_hdr(skb);
+
+               th->seq = htonl(seq);
+               th->cwr = 0;
+       } while (skb->next);
+
+       /* Following permits TCP Small Queues to work well with GSO :
+        * The callback to TCP stack will be called at the time last frag
+        * is freed at TX completion, and not right now when gso_skb
+        * is freed by GSO engine
+        */
+       if (copy_destructor) {
+               swap(gso_skb->sk, skb->sk);
+               swap(gso_skb->destructor, skb->destructor);
+               swap(gso_skb->truesize, skb->truesize);
+       }
+
+       delta = htonl(oldlen + (skb_tail_pointer(skb) -
+                               skb_transport_header(skb)) +
+                     skb->data_len);
+       th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
+                               (__force u32)delta));
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               th->check = csum_fold(csum_partial(skb_transport_header(skb),
+                                                  thlen, skb->csum));
+out:
+       return segs;
+}
+EXPORT_SYMBOL(tcp_tso_segment);
+
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+       struct sk_buff **pp = NULL;
+       struct sk_buff *p;
+       struct tcphdr *th;
+       struct tcphdr *th2;
+       unsigned int len;
+       unsigned int thlen;
+       __be32 flags;
+       unsigned int mss = 1;
+       unsigned int hlen;
+       unsigned int off;
+       int flush = 1;
+       int i;
+
+       off = skb_gro_offset(skb);
+       hlen = off + sizeof(*th);
+       th = skb_gro_header_fast(skb, off);
+       if (skb_gro_header_hard(skb, hlen)) {
+               th = skb_gro_header_slow(skb, hlen, off);
+               if (unlikely(!th))
+                       goto out;
+       }
+
+       thlen = th->doff * 4;
+       if (thlen < sizeof(*th))
+               goto out;
+
+       hlen = off + thlen;
+       if (skb_gro_header_hard(skb, hlen)) {
+               th = skb_gro_header_slow(skb, hlen, off);
+               if (unlikely(!th))
+                       goto out;
+       }
+
+       skb_gro_pull(skb, thlen);
+
+       len = skb_gro_len(skb);
+       flags = tcp_flag_word(th);
+
+       for (; (p = *head); head = &p->next) {
+               if (!NAPI_GRO_CB(p)->same_flow)
+                       continue;
+
+               th2 = tcp_hdr(p);
+
+               if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
+                       NAPI_GRO_CB(p)->same_flow = 0;
+                       continue;
+               }
+
+               goto found;
+       }
+
+       goto out_check_final;
+
+found:
+       flush = NAPI_GRO_CB(p)->flush;
+       flush |= (__force int)(flags & TCP_FLAG_CWR);
+       flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
+                 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
+       flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
+       for (i = sizeof(*th); i < thlen; i += 4)
+               flush |= *(u32 *)((u8 *)th + i) ^
+                        *(u32 *)((u8 *)th2 + i);
+
+       mss = tcp_skb_mss(p);
+
+       flush |= (len - 1) >= mss;
+       flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
+
+       if (flush || skb_gro_receive(head, skb)) {
+               mss = 1;
+               goto out_check_final;
+       }
+
+       p = *head;
+       th2 = tcp_hdr(p);
+       tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
+
+out_check_final:
+       flush = len < mss;
+       flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
+                                       TCP_FLAG_RST | TCP_FLAG_SYN |
+                                       TCP_FLAG_FIN));
+
+       if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
+               pp = head;
+
+out:
+       NAPI_GRO_CB(skb)->flush |= flush;
+
+       return pp;
+}
+EXPORT_SYMBOL(tcp_gro_receive);
+
+int tcp_gro_complete(struct sk_buff *skb)
+{
+       struct tcphdr *th = tcp_hdr(skb);
+
+       skb->csum_start = skb_transport_header(skb) - skb->head;
+       skb->csum_offset = offsetof(struct tcphdr, check);
+       skb->ip_summed = CHECKSUM_PARTIAL;
+
+       skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+       if (th->cwr)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+       return 0;
+}
+EXPORT_SYMBOL(tcp_gro_complete);
+
+static int tcp_v4_gso_send_check(struct sk_buff *skb)
+{
+       const struct iphdr *iph;
+       struct tcphdr *th;
+
+       if (!pskb_may_pull(skb, sizeof(*th)))
+               return -EINVAL;
+
+       iph = ip_hdr(skb);
+       th = tcp_hdr(skb);
+
+       th->check = 0;
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
+       return 0;
+}
+
+static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+       const struct iphdr *iph = skb_gro_network_header(skb);
+       __wsum wsum;
+       __sum16 sum;
+
+       switch (skb->ip_summed) {
+       case CHECKSUM_COMPLETE:
+               if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
+                                 skb->csum)) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       break;
+               }
+flush:
+               NAPI_GRO_CB(skb)->flush = 1;
+               return NULL;
+
+       case CHECKSUM_NONE:
+               wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+                                         skb_gro_len(skb), IPPROTO_TCP, 0);
+               sum = csum_fold(skb_checksum(skb,
+                                            skb_gro_offset(skb),
+                                            skb_gro_len(skb),
+                                            wsum));
+               if (sum)
+                       goto flush;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               break;
+       }
+
+       return tcp_gro_receive(head, skb);
+}
+
+static int tcp4_gro_complete(struct sk_buff *skb)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct tcphdr *th = tcp_hdr(skb);
+
+       th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+                                 iph->saddr, iph->daddr, 0);
+       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+       return tcp_gro_complete(skb);
+}
+
+static const struct net_offload tcpv4_offload = {
+       .callbacks = {
+               .gso_send_check =       tcp_v4_gso_send_check,
+               .gso_segment    =       tcp_tso_segment,
+               .gro_receive    =       tcp4_gro_receive,
+               .gro_complete   =       tcp4_gro_complete,
+       },
+};
+
+int __init tcpv4_offload_init(void)
+{
+       return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
+}
index 536d40929ba6f809d5c79968c48053202dd3748f..3d609490f118e753ff791b0ac1fb79cbcb6a7fa6 100644 (file)
@@ -160,6 +160,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        const u32 now = tcp_time_stamp;
+       const struct dst_entry *dst = __sk_dst_get(sk);
 
        if (sysctl_tcp_slow_start_after_idle &&
            (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
@@ -170,8 +171,9 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        /* If it is a reply for ato after last received
         * packet, enter pingpong mode.
         */
-       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
-               icsk->icsk_ack.pingpong = 1;
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
+           (!dst || !dst_metric(dst, RTAX_QUICKACK)))
+                       icsk->icsk_ack.pingpong = 1;
 }
 
 /* Account for an ACK we sent. */
@@ -181,6 +183,21 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
        inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
 
+
+u32 tcp_default_init_rwnd(u32 mss)
+{
+       /* Initial receive window should be twice of TCP_INIT_CWND to
+        * enable proper sending of new unsent data during fast recovery
+        * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
+        * limit when mss is larger than 1460.
+        */
+       u32 init_rwnd = TCP_INIT_CWND * 2;
+
+       if (mss > 1460)
+               init_rwnd = max((1460 * init_rwnd) / mss, 2U);
+       return init_rwnd;
+}
+
 /* Determine a window scaling and initial window to offer.
  * Based on the assumption that the given amount of space
  * will be offered. Store the results in the tp structure.
@@ -230,22 +247,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
                }
        }
 
-       /* Set initial window to a value enough for senders starting with
-        * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
-        * a limit on the initial window when mss is larger than 1460.
-        */
        if (mss > (1 << *rcv_wscale)) {
-               int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
-               if (mss > 1460)
-                       init_cwnd =
-                       max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
-               /* when initializing use the value from init_rcv_wnd
-                * rather than the default from above
-                */
-               if (init_rcv_wnd)
-                       *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
-               else
-                       *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
+               if (!init_rcv_wnd) /* Use default unless specified otherwise */
+                       init_rcv_wnd = tcp_default_init_rwnd(mss);
+               *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
        }
 
        /* Set the clamp no higher than max representable value */
@@ -874,11 +879,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                                                           &md5);
        tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 
-       if (tcp_packets_in_flight(tp) == 0) {
+       if (tcp_packets_in_flight(tp) == 0)
                tcp_ca_event(sk, CA_EVENT_TX_START);
-               skb->ooo_okay = 1;
-       } else
-               skb->ooo_okay = 0;
+
+       /* if no packet is in qdisc/device queue, then allow XPS to select
+        * another queue.
+        */
+       skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
 
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
index 0bf5d399a03c1c0eaedeedd4a6a9db4ee2af68e6..959502afd8d9fda7b0a58046a10081b56e7ea093 100644 (file)
 #include <trace/events/udp.h>
 #include <linux/static_key.h>
 #include <trace/events/skb.h>
+#include <net/ll_poll.h>
 #include "udp_impl.h"
 
 struct udp_table udp_table __read_mostly;
@@ -429,7 +430,7 @@ begin:
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
                                hash = inet_ehashfn(net, daddr, hnum,
-                                                   saddr, htons(sport));
+                                                   saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -510,7 +511,7 @@ begin:
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
                                hash = inet_ehashfn(net, daddr, hnum,
-                                                   saddr, htons(sport));
+                                                   saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -1709,7 +1710,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 
        if (sk != NULL) {
-               int ret = udp_queue_rcv_skb(sk, skb);
+               int ret;
+
+               sk_mark_ll(sk, skb);
+               ret = udp_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
                /* a return value > 0 means to resubmit the input, but
@@ -1967,6 +1971,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
        unsigned int mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
+       sock_rps_record_flow(sk);
+
        /* Check for false positives due to checksum errors */
        if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
            !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
@@ -2284,29 +2290,8 @@ void __init udp_init(void)
        sysctl_udp_wmem_min = SK_MEM_QUANTUM;
 }
 
-int udp4_ufo_send_check(struct sk_buff *skb)
-{
-       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
-               return -EINVAL;
-
-       if (likely(!skb->encapsulation)) {
-               const struct iphdr *iph;
-               struct udphdr *uh;
-
-               iph = ip_hdr(skb);
-               uh = udp_hdr(skb);
-
-               uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
-                               IPPROTO_UDP, 0);
-               skb->csum_start = skb_transport_header(skb) - skb->head;
-               skb->csum_offset = offsetof(struct udphdr, check);
-               skb->ip_summed = CHECKSUM_PARTIAL;
-       }
-       return 0;
-}
-
-static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
-               netdev_features_t features)
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+                                      netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        int mac_len = skb->mac_len;
@@ -2365,53 +2350,3 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
 out:
        return segs;
 }
-
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
-       netdev_features_t features)
-{
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
-       unsigned int mss;
-       mss = skb_shinfo(skb)->gso_size;
-       if (unlikely(skb->len <= mss))
-               goto out;
-
-       if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
-               /* Packet is from an untrusted source, reset gso_segs. */
-               int type = skb_shinfo(skb)->gso_type;
-
-               if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
-                                     SKB_GSO_UDP_TUNNEL |
-                                     SKB_GSO_GRE) ||
-                            !(type & (SKB_GSO_UDP))))
-                       goto out;
-
-               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
-
-               segs = NULL;
-               goto out;
-       }
-
-       /* Fragment the skb. IP headers of the fragments are updated in
-        * inet_gso_segment()
-        */
-       if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
-               segs = skb_udp_tunnel_segment(skb, features);
-       else {
-               int offset;
-               __wsum csum;
-
-               /* Do software UFO. Complete and fill in the UDP checksum as
-                * HW cannot do checksum of UDP packets sent as multiple
-                * IP fragments.
-                */
-               offset = skb_checksum_start_offset(skb);
-               csum = skb_checksum(skb, offset, skb->len - offset, 0);
-               offset += skb->csum_offset;
-               *(__sum16 *)(skb->data + offset) = csum_fold(csum);
-               skb->ip_summed = CHECKSUM_NONE;
-
-               segs = skb_segment(skb, features);
-       }
-out:
-       return segs;
-}
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
new file mode 100644 (file)
index 0000000..f35ecca
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ *     IPV4 GSO/GRO offload support
+ *     Linux INET implementation
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ *
+ *     UDPv4 GSO support
+ */
+
+#include <linux/skbuff.h>
+#include <net/udp.h>
+#include <net/protocol.h>
+
+static int udp4_ufo_send_check(struct sk_buff *skb)
+{
+       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+               return -EINVAL;
+
+       if (likely(!skb->encapsulation)) {
+               const struct iphdr *iph;
+               struct udphdr *uh;
+
+               iph = ip_hdr(skb);
+               uh = udp_hdr(skb);
+
+               uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
+                               IPPROTO_UDP, 0);
+               skb->csum_start = skb_transport_header(skb) - skb->head;
+               skb->csum_offset = offsetof(struct udphdr, check);
+               skb->ip_summed = CHECKSUM_PARTIAL;
+       }
+
+       return 0;
+}
+
+static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+                                        netdev_features_t features)
+{
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       unsigned int mss;
+
+       mss = skb_shinfo(skb)->gso_size;
+       if (unlikely(skb->len <= mss))
+               goto out;
+
+       if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
+               /* Packet is from an untrusted source, reset gso_segs. */
+               int type = skb_shinfo(skb)->gso_type;
+
+               if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
+                                     SKB_GSO_UDP_TUNNEL |
+                                     SKB_GSO_GRE | SKB_GSO_MPLS) ||
+                            !(type & (SKB_GSO_UDP))))
+                       goto out;
+
+               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
+
+               segs = NULL;
+               goto out;
+       }
+
+       /* Fragment the skb. IP headers of the fragments are updated in
+        * inet_gso_segment()
+        */
+       if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
+               segs = skb_udp_tunnel_segment(skb, features);
+       else {
+               int offset;
+               __wsum csum;
+
+               /* Do software UFO. Complete and fill in the UDP checksum as
+                * HW cannot do checksum of UDP packets sent as multiple
+                * IP fragments.
+                */
+               offset = skb_checksum_start_offset(skb);
+               csum = skb_checksum(skb, offset, skb->len - offset, 0);
+               offset += skb->csum_offset;
+               *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+               skb->ip_summed = CHECKSUM_NONE;
+
+               segs = skb_segment(skb, features);
+       }
+out:
+       return segs;
+}
+
+static const struct net_offload udpv4_offload = {
+       .callbacks = {
+               .gso_send_check = udp4_ufo_send_check,
+               .gso_segment = udp4_ufo_fragment,
+       },
+};
+
+int __init udpv4_offload_init(void)
+{
+       return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
+}
index 05a5df2febc9d34ed22bdc8749b4a064c4550c14..06347dbd32c1ebe0d013f858de4a56abb66f3305 100644 (file)
@@ -63,7 +63,7 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
 static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
        .handler        =       xfrm_tunnel_rcv,
        .err_handler    =       xfrm_tunnel_err,
-       .priority       =       2,
+       .priority       =       3,
 };
 
 #if IS_ENABLED(CONFIG_IPV6)
index 9af088d2cdaab1f4489f092f9a473d70b5c49667..470a9c008e9b9e921045bb5b11b77569e53ac5c9 100644 (file)
@@ -7,7 +7,7 @@ obj-$(CONFIG_IPV6) += ipv6.o
 ipv6-objs :=   af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
                addrlabel.o \
                route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
-               raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
+               raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
                exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
 
 ipv6-offload :=        ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
index d1ab6ab29a55c11e0570bb9854c524d9f21369fc..4e4cc1fc26d148e51f5a3fa8e8f950a16eaa7de6 100644 (file)
@@ -253,37 +253,32 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
        return !qdisc_tx_is_noop(dev);
 }
 
-static void addrconf_del_timer(struct inet6_ifaddr *ifp)
+static void addrconf_del_rs_timer(struct inet6_dev *idev)
 {
-       if (del_timer(&ifp->timer))
+       if (del_timer(&idev->rs_timer))
+               __in6_dev_put(idev);
+}
+
+static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp)
+{
+       if (del_timer(&ifp->dad_timer))
                __in6_ifa_put(ifp);
 }
 
-enum addrconf_timer_t {
-       AC_NONE,
-       AC_DAD,
-       AC_RS,
-};
+static void addrconf_mod_rs_timer(struct inet6_dev *idev,
+                                 unsigned long when)
+{
+       if (!timer_pending(&idev->rs_timer))
+               in6_dev_hold(idev);
+       mod_timer(&idev->rs_timer, jiffies + when);
+}
 
-static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
-                              enum addrconf_timer_t what,
-                              unsigned long when)
+static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
+                                  unsigned long when)
 {
-       if (!del_timer(&ifp->timer))
+       if (!timer_pending(&ifp->dad_timer))
                in6_ifa_hold(ifp);
-
-       switch (what) {
-       case AC_DAD:
-               ifp->timer.function = addrconf_dad_timer;
-               break;
-       case AC_RS:
-               ifp->timer.function = addrconf_rs_timer;
-               break;
-       default:
-               break;
-       }
-       ifp->timer.expires = jiffies + when;
-       add_timer(&ifp->timer);
+       mod_timer(&ifp->dad_timer, jiffies + when);
 }
 
 static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -326,6 +321,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
 
        WARN_ON(!list_empty(&idev->addr_list));
        WARN_ON(idev->mc_list != NULL);
+       WARN_ON(timer_pending(&idev->rs_timer));
 
 #ifdef NET_REFCNT_DEBUG
        pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
@@ -357,7 +353,8 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        rwlock_init(&ndev->lock);
        ndev->dev = dev;
        INIT_LIST_HEAD(&ndev->addr_list);
-
+       setup_timer(&ndev->rs_timer, addrconf_rs_timer,
+                   (unsigned long)ndev);
        memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
        ndev->cnf.mtu6 = dev->mtu;
        ndev->cnf.sysctl = NULL;
@@ -776,7 +773,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
 
        in6_dev_put(ifp->idev);
 
-       if (del_timer(&ifp->timer))
+       if (del_timer(&ifp->dad_timer))
                pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
 
        if (ifp->state != INET6_IFADDR_STATE_DEAD) {
@@ -869,9 +866,9 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
 
        spin_lock_init(&ifa->lock);
        spin_lock_init(&ifa->state_lock);
-       init_timer(&ifa->timer);
+       setup_timer(&ifa->dad_timer, addrconf_dad_timer,
+                   (unsigned long)ifa);
        INIT_HLIST_NODE(&ifa->addr_lst);
-       ifa->timer.data = (unsigned long) ifa;
        ifa->scope = scope;
        ifa->prefix_len = pfxlen;
        ifa->flags = flags | IFA_F_TENTATIVE;
@@ -994,7 +991,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
        }
        write_unlock_bh(&idev->lock);
 
-       addrconf_del_timer(ifp);
+       addrconf_del_dad_timer(ifp);
 
        ipv6_ifa_notify(RTM_DELADDR, ifp);
 
@@ -1126,8 +1123,7 @@ retry:
 
        ift = !max_addresses ||
              ipv6_count_addresses(idev) < max_addresses ?
-               ipv6_add_addr(idev, &addr, tmp_plen,
-                             ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
+               ipv6_add_addr(idev, &addr, tmp_plen, ipv6_addr_scope(&addr),
                              addr_flags) : NULL;
        if (IS_ERR_OR_NULL(ift)) {
                in6_ifa_put(ifp);
@@ -1448,6 +1444,23 @@ try_nextdev:
 }
 EXPORT_SYMBOL(ipv6_dev_get_saddr);
 
+static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+                            unsigned char banned_flags)
+{
+       struct inet6_ifaddr *ifp;
+       int err = -EADDRNOTAVAIL;
+
+       list_for_each_entry(ifp, &idev->addr_list, if_list) {
+               if (ifp->scope == IFA_LINK &&
+                   !(ifp->flags & banned_flags)) {
+                       *addr = ifp->addr;
+                       err = 0;
+                       break;
+               }
+       }
+       return err;
+}
+
 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
                    unsigned char banned_flags)
 {
@@ -1457,17 +1470,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
        rcu_read_lock();
        idev = __in6_dev_get(dev);
        if (idev) {
-               struct inet6_ifaddr *ifp;
-
                read_lock_bh(&idev->lock);
-               list_for_each_entry(ifp, &idev->addr_list, if_list) {
-                       if (ifp->scope == IFA_LINK &&
-                           !(ifp->flags & banned_flags)) {
-                               *addr = ifp->addr;
-                               err = 0;
-                               break;
-                       }
-               }
+               err = __ipv6_get_lladdr(idev, addr, banned_flags);
                read_unlock_bh(&idev->lock);
        }
        rcu_read_unlock();
@@ -1487,7 +1491,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
 }
 
 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
-                 struct net_device *dev, int strict)
+                 const struct net_device *dev, int strict)
 {
        struct inet6_ifaddr *ifp;
        unsigned int hash = inet6_addr_hash(addr);
@@ -1581,7 +1585,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
 {
        if (ifp->flags&IFA_F_PERMANENT) {
                spin_lock_bh(&ifp->lock);
-               addrconf_del_timer(ifp);
+               addrconf_del_dad_timer(ifp);
                ifp->flags |= IFA_F_TENTATIVE;
                if (dad_failed)
                        ifp->flags |= IFA_F_DADFAILED;
@@ -2402,6 +2406,7 @@ err_exit:
  *     Manual configuration of address on an interface
  */
 static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
+                         const struct in6_addr *peer_pfx,
                          unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
                          __u32 valid_lft)
 {
@@ -2457,6 +2462,8 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
                ifp->valid_lft = valid_lft;
                ifp->prefered_lft = prefered_lft;
                ifp->tstamp = jiffies;
+               if (peer_pfx)
+                       ifp->peer_addr = *peer_pfx;
                spin_unlock_bh(&ifp->lock);
 
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
@@ -2500,12 +2507,6 @@ static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *p
                        read_unlock_bh(&idev->lock);
 
                        ipv6_del_addr(ifp);
-
-                       /* If the last address is deleted administratively,
-                          disable IPv6 on this interface.
-                        */
-                       if (list_empty(&idev->addr_list))
-                               addrconf_ifdown(idev->dev, 1);
                        return 0;
                }
        }
@@ -2526,7 +2527,7 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
                return -EFAULT;
 
        rtnl_lock();
-       err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
+       err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
                             ireq.ifr6_prefixlen, IFA_F_PERMANENT,
                             INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
        rtnl_unlock();
@@ -2658,8 +2659,10 @@ static void init_loopback(struct net_device *dev)
                        sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
 
                        /* Failure cases are ignored */
-                       if (!IS_ERR(sp_rt))
+                       if (!IS_ERR(sp_rt)) {
+                               sp_ifa->rt = sp_rt;
                                ip6_ins_rt(sp_rt);
+                       }
                }
                read_unlock_bh(&idev->lock);
        }
@@ -2756,8 +2759,6 @@ static void addrconf_gre_config(struct net_device *dev)
        struct inet6_dev *idev;
        struct in6_addr addr;
 
-       pr_info("%s(%s)\n", __func__, dev->name);
-
        ASSERT_RTNL();
 
        if ((idev = ipv6_find_idev(dev)) == NULL) {
@@ -2824,9 +2825,9 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
 }
 
 static int addrconf_notify(struct notifier_block *this, unsigned long event,
-                          void *data)
+                          void *ptr)
 {
-       struct net_device *dev = (struct net_device *) data;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct inet6_dev *idev = __in6_dev_get(dev);
        int run_pending = 0;
        int err;
@@ -3034,7 +3035,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
                hlist_for_each_entry_rcu(ifa, h, addr_lst) {
                        if (ifa->idev == idev) {
                                hlist_del_init_rcu(&ifa->addr_lst);
-                               addrconf_del_timer(ifa);
+                               addrconf_del_dad_timer(ifa);
                                goto restart;
                        }
                }
@@ -3043,6 +3044,8 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
        write_lock_bh(&idev->lock);
 
+       addrconf_del_rs_timer(idev);
+
        /* Step 2: clear flags for stateless addrconf */
        if (!how)
                idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
@@ -3072,7 +3075,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
        while (!list_empty(&idev->addr_list)) {
                ifa = list_first_entry(&idev->addr_list,
                                       struct inet6_ifaddr, if_list);
-               addrconf_del_timer(ifa);
+               addrconf_del_dad_timer(ifa);
 
                list_del(&ifa->if_list);
 
@@ -3114,10 +3117,10 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
 static void addrconf_rs_timer(unsigned long data)
 {
-       struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
-       struct inet6_dev *idev = ifp->idev;
+       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct in6_addr lladdr;
 
-       read_lock(&idev->lock);
+       write_lock(&idev->lock);
        if (idev->dead || !(idev->if_flags & IF_READY))
                goto out;
 
@@ -3128,18 +3131,19 @@ static void addrconf_rs_timer(unsigned long data)
        if (idev->if_flags & IF_RA_RCVD)
                goto out;
 
-       spin_lock(&ifp->lock);
-       if (ifp->probes++ < idev->cnf.rtr_solicits) {
-               /* The wait after the last probe can be shorter */
-               addrconf_mod_timer(ifp, AC_RS,
-                                  (ifp->probes == idev->cnf.rtr_solicits) ?
-                                  idev->cnf.rtr_solicit_delay :
-                                  idev->cnf.rtr_solicit_interval);
-               spin_unlock(&ifp->lock);
+       if (idev->rs_probes++ < idev->cnf.rtr_solicits) {
+               if (!__ipv6_get_lladdr(idev, &lladdr, IFA_F_TENTATIVE))
+                       ndisc_send_rs(idev->dev, &lladdr,
+                                     &in6addr_linklocal_allrouters);
+               else
+                       goto out;
 
-               ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
+               /* The wait after the last probe can be shorter */
+               addrconf_mod_rs_timer(idev, (idev->rs_probes ==
+                                            idev->cnf.rtr_solicits) ?
+                                     idev->cnf.rtr_solicit_delay :
+                                     idev->cnf.rtr_solicit_interval);
        } else {
-               spin_unlock(&ifp->lock);
                /*
                 * Note: we do not support deprecated "all on-link"
                 * assumption any longer.
@@ -3148,8 +3152,8 @@ static void addrconf_rs_timer(unsigned long data)
        }
 
 out:
-       read_unlock(&idev->lock);
-       in6_ifa_put(ifp);
+       write_unlock(&idev->lock);
+       in6_dev_put(idev);
 }
 
 /*
@@ -3165,8 +3169,8 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
        else
                rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1);
 
-       ifp->probes = idev->cnf.dad_transmits;
-       addrconf_mod_timer(ifp, AC_DAD, rand_num);
+       ifp->dad_probes = idev->cnf.dad_transmits;
+       addrconf_mod_dad_timer(ifp, rand_num);
 }
 
 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
@@ -3227,40 +3231,40 @@ static void addrconf_dad_timer(unsigned long data)
        struct inet6_dev *idev = ifp->idev;
        struct in6_addr mcaddr;
 
-       if (!ifp->probes && addrconf_dad_end(ifp))
+       if (!ifp->dad_probes && addrconf_dad_end(ifp))
                goto out;
 
-       read_lock(&idev->lock);
+       write_lock(&idev->lock);
        if (idev->dead || !(idev->if_flags & IF_READY)) {
-               read_unlock(&idev->lock);
+               write_unlock(&idev->lock);
                goto out;
        }
 
        spin_lock(&ifp->lock);
        if (ifp->state == INET6_IFADDR_STATE_DEAD) {
                spin_unlock(&ifp->lock);
-               read_unlock(&idev->lock);
+               write_unlock(&idev->lock);
                goto out;
        }
 
-       if (ifp->probes == 0) {
+       if (ifp->dad_probes == 0) {
                /*
                 * DAD was successful
                 */
 
                ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
                spin_unlock(&ifp->lock);
-               read_unlock(&idev->lock);
+               write_unlock(&idev->lock);
 
                addrconf_dad_completed(ifp);
 
                goto out;
        }
 
-       ifp->probes--;
-       addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
+       ifp->dad_probes--;
+       addrconf_mod_dad_timer(ifp, ifp->idev->nd_parms->retrans_time);
        spin_unlock(&ifp->lock);
-       read_unlock(&idev->lock);
+       write_unlock(&idev->lock);
 
        /* send a neighbour solicitation for our addr */
        addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
@@ -3272,6 +3276,9 @@ out:
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
 {
        struct net_device *dev = ifp->idev->dev;
+       struct in6_addr lladdr;
+
+       addrconf_del_dad_timer(ifp);
 
        /*
         *      Configure the address for reception. Now it is valid.
@@ -3292,13 +3299,20 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
                 *      [...] as part of DAD [...] there is no need
                 *      to delay again before sending the first RS
                 */
-               ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
+               if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+                       ndisc_send_rs(dev, &lladdr,
+                                     &in6addr_linklocal_allrouters);
+               else
+                       return;
 
-               spin_lock_bh(&ifp->lock);
-               ifp->probes = 1;
+               write_lock_bh(&ifp->idev->lock);
+               spin_lock(&ifp->lock);
+               ifp->idev->rs_probes = 1;
                ifp->idev->if_flags |= IF_RS_SENT;
-               addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval);
-               spin_unlock_bh(&ifp->lock);
+               addrconf_mod_rs_timer(ifp->idev,
+                                     ifp->idev->cnf.rtr_solicit_interval);
+               spin_unlock(&ifp->lock);
+               write_unlock_bh(&ifp->idev->lock);
        }
 }
 
@@ -3610,18 +3624,20 @@ restart:
        rcu_read_unlock_bh();
 }
 
-static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local)
+static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
+                                    struct in6_addr **peer_pfx)
 {
        struct in6_addr *pfx = NULL;
 
+       *peer_pfx = NULL;
+
        if (addr)
                pfx = nla_data(addr);
 
        if (local) {
                if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
-                       pfx = NULL;
-               else
-                       pfx = nla_data(local);
+                       *peer_pfx = pfx;
+               pfx = nla_data(local);
        }
 
        return pfx;
@@ -3639,7 +3655,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct net *net = sock_net(skb->sk);
        struct ifaddrmsg *ifm;
        struct nlattr *tb[IFA_MAX+1];
-       struct in6_addr *pfx;
+       struct in6_addr *pfx, *peer_pfx;
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@ -3647,7 +3663,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
                return err;
 
        ifm = nlmsg_data(nlh);
-       pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]);
+       pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
        if (pfx == NULL)
                return -EINVAL;
 
@@ -3705,7 +3721,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct net *net = sock_net(skb->sk);
        struct ifaddrmsg *ifm;
        struct nlattr *tb[IFA_MAX+1];
-       struct in6_addr *pfx;
+       struct in6_addr *pfx, *peer_pfx;
        struct inet6_ifaddr *ifa;
        struct net_device *dev;
        u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
@@ -3717,7 +3733,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
                return err;
 
        ifm = nlmsg_data(nlh);
-       pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]);
+       pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
        if (pfx == NULL)
                return -EINVAL;
 
@@ -3745,7 +3761,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
                 * It would be best to check for !NLM_F_CREATE here but
                 * userspace alreay relies on not having to provide this.
                 */
-               return inet6_addr_add(net, ifm->ifa_index, pfx,
+               return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
                                      ifm->ifa_prefixlen, ifa_flags,
                                      preferred_lft, valid_lft);
        }
@@ -3802,6 +3818,7 @@ static inline int rt_scope(int ifa_scope)
 static inline int inet6_ifaddr_msgsize(void)
 {
        return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+              + nla_total_size(16) /* IFA_LOCAL */
               + nla_total_size(16) /* IFA_ADDRESS */
               + nla_total_size(sizeof(struct ifa_cacheinfo));
 }
@@ -3840,13 +3857,22 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
                valid = INFINITY_LIFE_TIME;
        }
 
-       if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 ||
-           put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) {
-               nlmsg_cancel(skb, nlh);
-               return -EMSGSIZE;
-       }
+       if (!ipv6_addr_any(&ifa->peer_addr)) {
+               if (nla_put(skb, IFA_LOCAL, 16, &ifa->addr) < 0 ||
+                   nla_put(skb, IFA_ADDRESS, 16, &ifa->peer_addr) < 0)
+                       goto error;
+       } else
+               if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0)
+                       goto error;
+
+       if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
+               goto error;
 
        return nlmsg_end(skb, nlh);
+
+error:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
 }
 
 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
@@ -4046,7 +4072,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        struct net *net = sock_net(in_skb->sk);
        struct ifaddrmsg *ifm;
        struct nlattr *tb[IFA_MAX+1];
-       struct in6_addr *addr = NULL;
+       struct in6_addr *addr = NULL, *peer;
        struct net_device *dev = NULL;
        struct inet6_ifaddr *ifa;
        struct sk_buff *skb;
@@ -4056,7 +4082,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        if (err < 0)
                goto errout;
 
-       addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]);
+       addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
        if (addr == NULL) {
                err = -EINVAL;
                goto errout;
@@ -4335,8 +4361,11 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 
        write_lock_bh(&idev->lock);
 
-       if (update_rs)
+       if (update_rs) {
                idev->if_flags |= IF_RS_SENT;
+               idev->rs_probes = 1;
+               addrconf_mod_rs_timer(idev, idev->cnf.rtr_solicit_interval);
+       }
 
        /* Well, that's kinda nasty ... */
        list_for_each_entry(ifp, &idev->addr_list, if_list) {
@@ -4349,6 +4378,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
        }
 
        write_unlock_bh(&idev->lock);
+       addrconf_verify(0);
        return 0;
 }
 
@@ -4564,11 +4594,26 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                        ip6_ins_rt(ifp->rt);
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
+               if (!ipv6_addr_any(&ifp->peer_addr))
+                       addrconf_prefix_route(&ifp->peer_addr, 128,
+                                             ifp->idev->dev, 0, 0);
                break;
        case RTM_DELADDR:
                if (ifp->idev->cnf.forwarding)
                        addrconf_leave_anycast(ifp);
                addrconf_leave_solict(ifp->idev, &ifp->addr);
+               if (!ipv6_addr_any(&ifp->peer_addr)) {
+                       struct rt6_info *rt;
+                       struct net_device *dev = ifp->idev->dev;
+
+                       rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
+                                       dev->ifindex, 1);
+                       if (rt) {
+                               dst_hold(&rt->dst);
+                               if (ip6_del_rt(rt))
+                                       dst_free(&rt->dst);
+                       }
+               }
                dst_hold(&ifp->rt->dst);
 
                if (ip6_del_rt(ifp->rt))
@@ -4589,13 +4634,13 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 #ifdef CONFIG_SYSCTL
 
 static
-int addrconf_sysctl_forward(ctl_table *ctl, int write,
+int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
-       ctl_table lctl;
+       struct ctl_table lctl;
        int ret;
 
        /*
@@ -4616,13 +4661,16 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
 
 static void dev_disable_change(struct inet6_dev *idev)
 {
+       struct netdev_notifier_info info;
+
        if (!idev || !idev->dev)
                return;
 
+       netdev_notifier_info_init(&info, idev->dev);
        if (idev->cnf.disable_ipv6)
-               addrconf_notify(NULL, NETDEV_DOWN, idev->dev);
+               addrconf_notify(NULL, NETDEV_DOWN, &info);
        else
-               addrconf_notify(NULL, NETDEV_UP, idev->dev);
+               addrconf_notify(NULL, NETDEV_UP, &info);
 }
 
 static void addrconf_disable_change(struct net *net, __s32 newf)
@@ -4671,13 +4719,13 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
 }
 
 static
-int addrconf_sysctl_disable(ctl_table *ctl, int write,
+int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
                            void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
-       ctl_table lctl;
+       struct ctl_table lctl;
        int ret;
 
        /*
@@ -4699,7 +4747,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
 static struct addrconf_sysctl_table
 {
        struct ctl_table_header *sysctl_header;
-       ctl_table addrconf_vars[DEVCONF_MAX+1];
+       struct ctl_table addrconf_vars[DEVCONF_MAX+1];
 } addrconf_sysctl __read_mostly = {
        .sysctl_header = NULL,
        .addrconf_vars = {
index 72104562c86481311857359882669527359a7f6c..d2f87427244b2ccd00f754048e46d4882f77363d 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/export.h>
 #include <net/ipv6.h>
+#include <net/addrconf.h>
 
 #define IPV6_ADDR_SCOPE_TYPE(scope)    ((scope) << 16)
 
index ab5c7ad482cded367c1180a3a3761615bc2a4d54..a5ac969aeefe5337d7fa4c5601912fb96b1a5496 100644 (file)
@@ -49,6 +49,7 @@
 #include <net/udp.h>
 #include <net/udplite.h>
 #include <net/tcp.h>
+#include <net/ping.h>
 #include <net/protocol.h>
 #include <net/inet_common.h>
 #include <net/route.h>
@@ -840,6 +841,9 @@ static int __init inet6_init(void)
        if (err)
                goto out_unregister_udplite_proto;
 
+       err = proto_register(&pingv6_prot, 1);
+       if (err)
+               goto out_unregister_ping_proto;
 
        /* We MUST register RAW sockets before we create the ICMP6,
         * IGMP6, or NDISC control sockets.
@@ -930,6 +934,10 @@ static int __init inet6_init(void)
        if (err)
                goto ipv6_packet_fail;
 
+       err = pingv6_init();
+       if (err)
+               goto pingv6_fail;
+
 #ifdef CONFIG_SYSCTL
        err = ipv6_sysctl_register();
        if (err)
@@ -942,6 +950,8 @@ out:
 sysctl_fail:
        ipv6_packet_cleanup();
 #endif
+pingv6_fail:
+       pingv6_exit();
 ipv6_packet_fail:
        tcpv6_exit();
 tcpv6_fail:
@@ -985,6 +995,8 @@ register_pernet_fail:
        rtnl_unregister_all(PF_INET6);
 out_sock_register_fail:
        rawv6_exit();
+out_unregister_ping_proto:
+       proto_unregister(&pingv6_prot);
 out_unregister_raw_proto:
        proto_unregister(&rawv6_prot);
 out_unregister_udplite_proto:
index 4b56cbbc789062d89c5a708386a03e5a8ded8bc6..197e6f4a2b7499c67440b66e04d646d631caefa5 100644 (file)
@@ -879,3 +879,30 @@ exit_f:
        return err;
 }
 EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+
+void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+                            __u16 srcp, __u16 destp, int bucket)
+{
+       struct ipv6_pinfo *np = inet6_sk(sp);
+       const struct in6_addr *dest, *src;
+
+       dest  = &np->daddr;
+       src   = &np->rcv_saddr;
+       seq_printf(seq,
+                  "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
+                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+                  bucket,
+                  src->s6_addr32[0], src->s6_addr32[1],
+                  src->s6_addr32[2], src->s6_addr32[3], srcp,
+                  dest->s6_addr32[0], dest->s6_addr32[1],
+                  dest->s6_addr32[2], dest->s6_addr32[3], destp,
+                  sp->sk_state,
+                  sk_wmem_alloc_get(sp),
+                  sk_rmem_alloc_get(sp),
+                  0, 0L, 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
+                  sock_i_ino(sp),
+                  atomic_read(&sp->sk_refcnt), sp,
+                  atomic_read(&sp->sk_drops));
+}
index c5e83fae4df423ccbe02bed8bf31ffd415014ad2..140748debc4ade194e5e179636e94264da7e65a1 100644 (file)
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(ipv6_skip_exthdr);
 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
 {
        const unsigned char *nh = skb_network_header(skb);
-       int packet_len = skb->tail - skb->network_header;
+       int packet_len = skb_tail_pointer(skb) - skb_network_header(skb);
        struct ipv6_opt_hdr *hdr;
        int len;
 
index b4ff0a42b8c70248faf1b7298c1bec2cc79368ee..7cfc8d284870f3e6c602988b640824c44d08555f 100644 (file)
@@ -57,6 +57,7 @@
 
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
+#include <net/ping.h>
 #include <net/protocol.h>
 #include <net/raw.h>
 #include <net/rawv6.h>
@@ -84,12 +85,18 @@ static inline struct sock *icmpv6_sk(struct net *net)
 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                       u8 type, u8 code, int offset, __be32 info)
 {
+       /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
+       struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
        struct net *net = dev_net(skb->dev);
 
        if (type == ICMPV6_PKT_TOOBIG)
                ip6_update_pmtu(skb, net, info, 0, 0);
        else if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, 0, 0);
+
+       if (!(type & ICMPV6_INFOMSG_MASK))
+               if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
+                       ping_err(skb, offset, info);
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
@@ -224,7 +231,8 @@ static bool opt_unrec(struct sk_buff *skb, __u32 offset)
        return (*op & 0xC0) == 0x80;
 }
 
-static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len)
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+                              struct icmp6hdr *thdr, int len)
 {
        struct sk_buff *skb;
        struct icmp6hdr *icmp6h;
@@ -307,8 +315,8 @@ static void mip6_addr_swap(struct sk_buff *skb)
 static inline void mip6_addr_swap(struct sk_buff *skb) {}
 #endif
 
-static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
-                                            struct sock *sk, struct flowi6 *fl6)
+struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+                                     struct sock *sk, struct flowi6 *fl6)
 {
        struct dst_entry *dst, *dst2;
        struct flowi6 fl2;
@@ -391,7 +399,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        int err = 0;
 
        if ((u8 *)hdr < skb->head ||
-           (skb->network_header + sizeof(*hdr)) > skb->tail)
+           (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
                return;
 
        /*
@@ -697,7 +705,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
                skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
                                             IPPROTO_ICMPV6, 0));
                if (__skb_checksum_complete(skb)) {
-                       LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
+                       LIMIT_NETDEBUG(KERN_DEBUG
+                                      "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
                                       saddr, daddr);
                        goto csum_error;
                }
@@ -718,7 +727,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
                break;
 
        case ICMPV6_ECHO_REPLY:
-               /* we couldn't care less */
+               ping_rcv(skb);
                break;
 
        case ICMPV6_PKT_TOOBIG:
@@ -967,7 +976,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
 EXPORT_SYMBOL(icmpv6_err_convert);
 
 #ifdef CONFIG_SYSCTL
-ctl_table ipv6_icmp_table_template[] = {
+struct ctl_table ipv6_icmp_table_template[] = {
        {
                .procname       = "ratelimit",
                .data           = &init_net.ipv6.sysctl.icmpv6_time,
index d3ddd840035450570d634fb22002496a447d5d34..ecd60733e5e24afdb28a52c95686fec28e2e4d73 100644 (file)
@@ -1081,6 +1081,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                }
                if (t == NULL)
                        t = netdev_priv(dev);
+               memset(&p, 0, sizeof(p));
                ip6gre_tnl_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                        err = -EFAULT;
@@ -1128,6 +1129,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                if (t) {
                        err = 0;
 
+                       memset(&p, 0, sizeof(p));
                        ip6gre_tnl_parm_to_user(&p, &t->parms);
                        if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                                err = -EFAULT;
index 71b766ee821d64fd10e99482b962ca6cea07cdad..a263b990ee11d7bf504512c4c722e927d1e8ed66 100644 (file)
@@ -98,6 +98,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
                       SKB_GSO_UDP_TUNNEL |
+                      SKB_GSO_MPLS |
                       SKB_GSO_TCPV6 |
                       0)))
                goto out;
index d2eedf192330caf5a963c27ddaceffae5e4886b8..dae1949019d7b8dc77d14c39478571e86ad89f34 100644 (file)
@@ -1147,7 +1147,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                        if (WARN_ON(np->cork.opt))
                                return -EINVAL;
 
-                       np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
+                       np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
                        if (unlikely(np->cork.opt == NULL))
                                return -ENOBUFS;
 
index 241fb8ad9fcf28e2982f32e23ff9b7403ceef24a..583e8d435f9a2c47437d3897b7167ac4a4025a40 100644 (file)
@@ -1319,7 +1319,7 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
 static int ip6mr_device_event(struct notifier_block *this,
                              unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
        struct mr6_table *mrt;
        struct mif_device *v;
index bfa6cc36ef2ab33e6e2894e03bb07c50f56a5804..72c8bfe06bb4afa547db787756d974998f93d86f 100644 (file)
@@ -1409,8 +1409,9 @@ static void mld_sendpack(struct sk_buff *skb)
        idev = __in6_dev_get(skb->dev);
        IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
 
-       payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
-       mldlen = skb->tail - skb->transport_header;
+       payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
+               sizeof(*pip6);
+       mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
        pip6->payload_len = htons(payload_len);
 
        pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
index 0f9bdc5ee9f38c70f1c4c265e8ebe43467d6b59a..9ac01dc9402e9337d4b27952127d229acbb6e062 100644 (file)
@@ -268,7 +268,8 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
        struct ipv6_opt_hdr *exthdr =
                                   (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
        const unsigned char *nh = skb_network_header(skb);
-       unsigned int packet_len = skb->tail - skb->network_header;
+       unsigned int packet_len = skb_tail_pointer(skb) -
+               skb_network_header(skb);
        int found_rhdr = 0;
 
        *nexthdr = &ipv6_hdr(skb)->nexthdr;
@@ -404,7 +405,8 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
        struct ipv6_opt_hdr *exthdr =
                                   (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
        const unsigned char *nh = skb_network_header(skb);
-       unsigned int packet_len = skb->tail - skb->network_header;
+       unsigned int packet_len = skb_tail_pointer(skb) -
+               skb_network_header(skb);
        int found_rhdr = 0;
 
        *nexthdr = &ipv6_hdr(skb)->nexthdr;
index 2712ab22a174087c09cc705e1f6adec2bd601154..b3b5730b48c5c297e1d425fc1d9654a8e9fd8bfa 100644 (file)
@@ -693,7 +693,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
        const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
-       u32 ndoptlen = skb->tail - (skb->transport_header +
+       u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
                                    offsetof(struct nd_msg, opt));
        struct ndisc_options ndopts;
        struct net_device *dev = skb->dev;
@@ -853,7 +853,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
        const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
-       u32 ndoptlen = skb->tail - (skb->transport_header +
+       u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
                                    offsetof(struct nd_msg, opt));
        struct ndisc_options ndopts;
        struct net_device *dev = skb->dev;
@@ -1069,7 +1069,8 @@ static void ndisc_router_discovery(struct sk_buff *skb)
 
        __u8 * opt = (__u8 *)(ra_msg + 1);
 
-       optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg);
+       optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) -
+               sizeof(struct ra_msg);
 
        if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
                ND_PRINTK(2, warn, "RA: source address is not link-local\n");
@@ -1346,7 +1347,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
        u8 *hdr;
        struct ndisc_options ndopts;
        struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
-       u32 ndoptlen = skb->tail - (skb->transport_header +
+       u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
                                    offsetof(struct rd_msg, opt));
 
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1493,7 +1494,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
         */
 
        if (ha)
-               ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha);
+               ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
 
        /*
         *      build redirect option and copy skb over to the new packet.
@@ -1568,7 +1569,7 @@ int ndisc_rcv(struct sk_buff *skb)
 
 static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
        struct inet6_dev *idev;
 
index 72836f40b73075430f4ef76e0fb7d20c32b7401c..95f3f1da0d7f2ff20c3afa3eeda315dd9e2e6b5f 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 #include <linux/export.h>
+#include <net/addrconf.h>
 #include <net/dst.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
@@ -186,6 +187,10 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
        return csum;
 };
 
+static const struct nf_ipv6_ops ipv6ops = {
+       .chk_addr       = ipv6_chk_addr,
+};
+
 static const struct nf_afinfo nf_ip6_afinfo = {
        .family                 = AF_INET6,
        .checksum               = nf_ip6_checksum,
@@ -198,6 +203,7 @@ static const struct nf_afinfo nf_ip6_afinfo = {
 
 int __init ipv6_netfilter_init(void)
 {
+       RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
        return nf_register_afinfo(&nf_ip6_afinfo);
 }
 
@@ -206,5 +212,6 @@ int __init ipv6_netfilter_init(void)
  */
 void ipv6_netfilter_fini(void)
 {
+       RCU_INIT_POINTER(nf_ipv6_ops, NULL);
        nf_unregister_afinfo(&nf_ip6_afinfo);
 }
index 60e9053bab051abb1e69aadabcf36f39292d8e03..47bff610751922ebd80b21eb59387a334af9b3fb 100644 (file)
@@ -71,7 +71,7 @@ static int device_cmp(struct nf_conn *ct, void *ifindex)
 static int masq_device_event(struct notifier_block *this,
                             unsigned long event, void *ptr)
 {
-       const struct net_device *dev = ptr;
+       const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
 
        if (event == NETDEV_DOWN)
@@ -89,8 +89,10 @@ static int masq_inet_event(struct notifier_block *this,
                           unsigned long event, void *ptr)
 {
        struct inet6_ifaddr *ifa = ptr;
+       struct netdev_notifier_info info;
 
-       return masq_device_event(this, event, ifa->idev->dev);
+       netdev_notifier_info_init(&info, ifa->idev->dev);
+       return masq_device_event(this, event, &info);
 }
 
 static struct notifier_block masq_inet_notifier = {
index c2e73e647e440745acd32dcaa1564031f4323e38..ab92a3673fbbddd9f3d32b69730c3b1b61fbcbde 100644 (file)
@@ -40,7 +40,8 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
        u16 offset = sizeof(struct ipv6hdr);
        struct ipv6_opt_hdr *exthdr =
                                (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
-       unsigned int packet_len = skb->tail - skb->network_header;
+       unsigned int packet_len = skb_tail_pointer(skb) -
+               skb_network_header(skb);
        int found_rhdr = 0;
        *nexthdr = &ipv6_hdr(skb)->nexthdr;
 
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
new file mode 100644 (file)
index 0000000..2b52046
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             "Ping" sockets
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ * Based on ipv4/ping.c code.
+ *
+ * Authors:    Lorenzo Colitti (IPv6 support)
+ *             Vasiliy Kulikov / Openwall (IPv4 implementation, for Linux 2.6),
+ *             Pavel Kankovsky (IPv4 implementation, for Linux 2.4.32)
+ *
+ */
+
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/transp_v6.h>
+#include <net/ping.h>
+
+struct proto pingv6_prot = {
+       .name =         "PINGv6",
+       .owner =        THIS_MODULE,
+       .init =         ping_init_sock,
+       .close =        ping_close,
+       .connect =      ip6_datagram_connect,
+       .disconnect =   udp_disconnect,
+       .setsockopt =   ipv6_setsockopt,
+       .getsockopt =   ipv6_getsockopt,
+       .sendmsg =      ping_v6_sendmsg,
+       .recvmsg =      ping_recvmsg,
+       .bind =         ping_bind,
+       .backlog_rcv =  ping_queue_rcv_skb,
+       .hash =         ping_hash,
+       .unhash =       ping_unhash,
+       .get_port =     ping_get_port,
+       .obj_size =     sizeof(struct raw6_sock),
+};
+EXPORT_SYMBOL_GPL(pingv6_prot);
+
+static struct inet_protosw pingv6_protosw = {
+       .type =      SOCK_DGRAM,
+       .protocol =  IPPROTO_ICMPV6,
+       .prot =      &pingv6_prot,
+       .ops =       &inet6_dgram_ops,
+       .no_check =  UDP_CSUM_DEFAULT,
+       .flags =     INET_PROTOSW_REUSE,
+};
+
+
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+{
+       return -EAFNOSUPPORT;
+}
+static int dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+                                      struct sk_buff *skb)
+{
+       return -EAFNOSUPPORT;
+}
+static int dummy_icmpv6_err_convert(u8 type, u8 code, int *err)
+{
+       return -EAFNOSUPPORT;
+}
+static void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+                                 __be16 port, u32 info, u8 *payload) {}
+static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+                              const struct net_device *dev, int strict)
+{
+       return 0;
+}
+
+int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                   size_t len)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct icmp6hdr user_icmph;
+       int addr_type;
+       struct in6_addr *daddr;
+       int iif = 0;
+       struct flowi6 fl6;
+       int err;
+       int hlimit;
+       struct dst_entry *dst;
+       struct rt6_info *rt;
+       struct pingfakehdr pfh;
+
+       pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
+
+       err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph,
+                                 sizeof(user_icmph));
+       if (err)
+               return err;
+
+       if (msg->msg_name) {
+               struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name;
+               if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
+                   u->sin6_family != AF_INET6) {
+                       return -EINVAL;
+               }
+               if (sk->sk_bound_dev_if &&
+                   sk->sk_bound_dev_if != u->sin6_scope_id) {
+                       return -EINVAL;
+               }
+               daddr = &(u->sin6_addr);
+               iif = u->sin6_scope_id;
+       } else {
+               if (sk->sk_state != TCP_ESTABLISHED)
+                       return -EDESTADDRREQ;
+               daddr = &np->daddr;
+       }
+
+       if (!iif)
+               iif = sk->sk_bound_dev_if;
+
+       addr_type = ipv6_addr_type(daddr);
+       if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
+               return -EINVAL;
+       if (addr_type & IPV6_ADDR_MAPPED)
+               return -EINVAL;
+
+       /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
+
+       memset(&fl6, 0, sizeof(fl6));
+
+       fl6.flowi6_proto = IPPROTO_ICMPV6;
+       fl6.saddr = np->saddr;
+       fl6.daddr = *daddr;
+       fl6.fl6_icmp_type = user_icmph.icmp6_type;
+       fl6.fl6_icmp_code = user_icmph.icmp6_code;
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+               fl6.flowi6_oif = np->mcast_oif;
+       else if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->ucast_oif;
+
+       dst = ip6_sk_dst_lookup_flow(sk, &fl6,  daddr, 1);
+       if (IS_ERR(dst))
+               return PTR_ERR(dst);
+       rt = (struct rt6_info *) dst;
+
+       np = inet6_sk(sk);
+       if (!np)
+               return -EBADF;
+
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+               fl6.flowi6_oif = np->mcast_oif;
+       else if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->ucast_oif;
+
+       pfh.icmph.type = user_icmph.icmp6_type;
+       pfh.icmph.code = user_icmph.icmp6_code;
+       pfh.icmph.checksum = 0;
+       pfh.icmph.un.echo.id = inet->inet_sport;
+       pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
+       pfh.iov = msg->msg_iov;
+       pfh.wcheck = 0;
+       pfh.family = AF_INET6;
+
+       if (ipv6_addr_is_multicast(&fl6.daddr))
+               hlimit = np->mcast_hops;
+       else
+               hlimit = np->hop_limit;
+       if (hlimit < 0)
+               hlimit = ip6_dst_hoplimit(dst);
+
+       err = ip6_append_data(sk, ping_getfrag, &pfh, len,
+                             0, hlimit,
+                             np->tclass, NULL, &fl6, rt,
+                             MSG_DONTWAIT, np->dontfrag);
+
+       if (err) {
+               ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
+                                  ICMP6_MIB_OUTERRORS);
+               ip6_flush_pending_frames(sk);
+       } else {
+               err = icmpv6_push_pending_frames(sk, &fl6,
+                                                (struct icmp6hdr *) &pfh.icmph,
+                                                len);
+       }
+
+       return err;
+}
+
+#ifdef CONFIG_PROC_FS
+static void *ping_v6_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       return ping_seq_start(seq, pos, AF_INET6);
+}
+
+static int ping_v6_seq_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
+       } else {
+               int bucket = ((struct ping_iter_state *) seq->private)->bucket;
+               struct inet_sock *inet = inet_sk(v);
+               __u16 srcp = ntohs(inet->inet_sport);
+               __u16 destp = ntohs(inet->inet_dport);
+               ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
+       }
+       return 0;
+}
+
+static struct ping_seq_afinfo ping_v6_seq_afinfo = {
+       .name           = "icmp6",
+       .family         = AF_INET6,
+       .seq_fops       = &ping_seq_fops,
+       .seq_ops        = {
+               .start          = ping_v6_seq_start,
+               .show           = ping_v6_seq_show,
+               .next           = ping_seq_next,
+               .stop           = ping_seq_stop,
+       },
+};
+
+static int __net_init ping_v6_proc_init_net(struct net *net)
+{
+       return ping_proc_register(net, &ping_v6_seq_afinfo);
+}
+
+static void __net_init ping_v6_proc_exit_net(struct net *net)
+{
+       return ping_proc_unregister(net, &ping_v6_seq_afinfo);
+}
+
+static struct pernet_operations ping_v6_net_ops = {
+       .init = ping_v6_proc_init_net,
+       .exit = ping_v6_proc_exit_net,
+};
+#endif
+
+int __init pingv6_init(void)
+{
+#ifdef CONFIG_PROC_FS
+       int ret = register_pernet_subsys(&ping_v6_net_ops);
+       if (ret)
+               return ret;
+#endif
+       pingv6_ops.ipv6_recv_error = ipv6_recv_error;
+       pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
+       pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
+       pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
+       pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
+       return inet6_register_protosw(&pingv6_protosw);
+}
+
+/* This never gets called because it's not possible to unload the ipv6 module,
+ * but just in case.
+ */
+void pingv6_exit(void)
+{
+       pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
+       pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
+       pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
+       pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
+       pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
+#ifdef CONFIG_PROC_FS
+       unregister_pernet_subsys(&ping_v6_net_ops);
+#endif
+       inet6_unregister_protosw(&pingv6_protosw);
+}
index f3c1ff4357ff1ac91f5e69c704e03b1e83c0192d..51c3285b5d9b582cb1d6101d3fafc70c781f5a74 100644 (file)
@@ -90,7 +90,7 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
        SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
        SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
        SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
-       SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+       /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
        SNMP_MIB_SENTINEL
 };
 
index eedff8ccded507cc977bd073dbbf334b2624033b..c45f7a5c36e96f98487ca194c5f08e5d0f931852 100644 (file)
@@ -1132,7 +1132,8 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb != NULL)
-                       amount = skb->tail - skb->transport_header;
+                       amount = skb_tail_pointer(skb) -
+                               skb_transport_header(skb);
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
        }
@@ -1226,45 +1227,16 @@ struct proto rawv6_prot = {
 };
 
 #ifdef CONFIG_PROC_FS
-static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
-{
-       struct ipv6_pinfo *np = inet6_sk(sp);
-       const struct in6_addr *dest, *src;
-       __u16 destp, srcp;
-
-       dest  = &np->daddr;
-       src   = &np->rcv_saddr;
-       destp = 0;
-       srcp  = inet_sk(sp)->inet_num;
-       seq_printf(seq,
-                  "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
-                  i,
-                  src->s6_addr32[0], src->s6_addr32[1],
-                  src->s6_addr32[2], src->s6_addr32[3], srcp,
-                  dest->s6_addr32[0], dest->s6_addr32[1],
-                  dest->s6_addr32[2], dest->s6_addr32[3], destp,
-                  sp->sk_state,
-                  sk_wmem_alloc_get(sp),
-                  sk_rmem_alloc_get(sp),
-                  0, 0L, 0,
-                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
-                  0,
-                  sock_i_ino(sp),
-                  atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
-}
-
 static int raw6_seq_show(struct seq_file *seq, void *v)
 {
-       if (v == SEQ_START_TOKEN)
-               seq_printf(seq,
-                          "  sl  "
-                          "local_address                         "
-                          "remote_address                        "
-                          "st tx_queue rx_queue tr tm->when retrnsmt"
-                          "   uid  timeout inode ref pointer drops\n");
-       else
-               raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
+       } else {
+               struct sock *sp = v;
+               __u16 srcp  = inet_sk(sp)->inet_num;
+               ip6_dgram_sock_seq_show(seq, v, srcp, 0,
+                                       raw_seq_private(seq)->bucket);
+       }
        return 0;
 }
 
index ad0aa6b0b86ae02f80b6b2184588605a3d5d7a6c..7ca87b37c0efe0d103c4888777bfc1dbbaeb2ec8 100644 (file)
@@ -1649,7 +1649,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
        int optlen, on_link;
        u8 *lladdr;
 
-       optlen = skb->tail - skb->transport_header;
+       optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
        optlen -= sizeof(*msg);
 
        if (optlen < 0) {
@@ -2681,9 +2681,9 @@ errout:
 }
 
 static int ip6_route_dev_notify(struct notifier_block *this,
-                               unsigned long event, void *data)
+                               unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)data;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
 
        if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
@@ -2790,7 +2790,7 @@ static const struct file_operations rt6_stats_seq_fops = {
 #ifdef CONFIG_SYSCTL
 
 static
-int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
+int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
                              void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct net *net;
@@ -2805,7 +2805,7 @@ int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
        return 0;
 }
 
-ctl_table ipv6_route_table_template[] = {
+struct ctl_table ipv6_route_table_template[] = {
        {
                .procname       =       "flush",
                .data           =       &init_net.ipv6.sysctl.flush_delay,
index 335363478bbfa037e5a4ddc8c3020cc4eeae0a15..97a0bfe2c293c8eac296dc19d022d193df53cd28 100644 (file)
@@ -466,14 +466,14 @@ isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
 
 static void ipip6_tunnel_uninit(struct net_device *dev)
 {
-       struct net *net = dev_net(dev);
-       struct sit_net *sitn = net_generic(net, sit_net_id);
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       struct sit_net *sitn = net_generic(tunnel->net, sit_net_id);
 
        if (dev == sitn->fb_tunnel_dev) {
                RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
        } else {
-               ipip6_tunnel_unlink(sitn, netdev_priv(dev));
-               ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
+               ipip6_tunnel_unlink(sitn, tunnel);
+               ipip6_tunnel_del_prl(tunnel, NULL);
        }
        dev_put(dev);
 }
@@ -577,6 +577,10 @@ static int ipip6_rcv(struct sk_buff *skb)
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
 
+               if (tunnel->parms.iph.protocol != IPPROTO_IPV6 &&
+                   tunnel->parms.iph.protocol != 0)
+                       goto out;
+
                secpath_reset(skb);
                skb->mac_header = skb->network_header;
                skb_reset_network_header(skb);
@@ -617,6 +621,8 @@ static int ipip6_rcv(struct sk_buff *skb)
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
 
+               if (tunnel->net != dev_net(tunnel->dev))
+                       skb_scrub_packet(skb);
                netif_rx(skb);
 
                return 0;
@@ -629,6 +635,40 @@ out:
        return 0;
 }
 
+static const struct tnl_ptk_info tpi = {
+       /* no tunnel info required for ipip. */
+       .proto = htons(ETH_P_IP),
+};
+
+static int ipip_rcv(struct sk_buff *skb)
+{
+       const struct iphdr *iph;
+       struct ip_tunnel *tunnel;
+
+       if (iptunnel_pull_header(skb, 0, tpi.proto))
+               goto drop;
+
+       iph = ip_hdr(skb);
+
+       tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
+                                    iph->saddr, iph->daddr);
+       if (tunnel != NULL) {
+               if (tunnel->parms.iph.protocol != IPPROTO_IPIP &&
+                   tunnel->parms.iph.protocol != 0)
+                       goto drop;
+
+               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+               return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+       }
+
+       return 1;
+
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
 /*
  * If the IPv6 address comes from 6rd / 6to4 (RFC 3056) addr space this function
  * stores the embedded IPv4 address in v4dst and returns true.
@@ -690,13 +730,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
        __be16 df = tiph->frag_off;
        struct rtable *rt;                      /* Route to the other host */
        struct net_device *tdev;                /* Device to other host */
-       struct iphdr  *iph;                     /* Our new IP header */
        unsigned int max_headroom;              /* The extra header space needed */
        __be32 dst = tiph->daddr;
        struct flowi4 fl4;
        int    mtu;
        const struct in6_addr *addr6;
        int addr_type;
+       u8 ttl;
+       int err;
 
        if (skb->protocol != htons(ETH_P_IPV6))
                goto tx_error;
@@ -764,7 +805,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        goto tx_error;
        }
 
-       rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
+       rt = ip_route_output_ports(tunnel->net, &fl4, NULL,
                                   dst, tiph->saddr,
                                   0, 0,
                                   IPPROTO_IPV6, RT_TOS(tos),
@@ -819,6 +860,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        tunnel->err_count = 0;
        }
 
+       if (tunnel->net != dev_net(dev))
+               skb_scrub_packet(skb);
+
        /*
         * Okay, now see if we can stuff it in the buffer as-is.
         */
@@ -839,34 +883,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                skb = new_skb;
                iph6 = ipv6_hdr(skb);
        }
-
-       skb->transport_header = skb->network_header;
-       skb_push(skb, sizeof(struct iphdr));
-       skb_reset_network_header(skb);
-       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-       IPCB(skb)->flags = 0;
-       skb_dst_drop(skb);
-       skb_dst_set(skb, &rt->dst);
-
-       /*
-        *      Push down and install the IPIP header.
-        */
-
-       iph                     =       ip_hdr(skb);
-       iph->version            =       4;
-       iph->ihl                =       sizeof(struct iphdr)>>2;
-       iph->frag_off           =       df;
-       iph->protocol           =       IPPROTO_IPV6;
-       iph->tos                =       INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
-       iph->daddr              =       fl4.daddr;
-       iph->saddr              =       fl4.saddr;
-
-       if ((iph->ttl = tiph->ttl) == 0)
-               iph->ttl        =       iph6->hop_limit;
-
-       skb->ip_summed = CHECKSUM_NONE;
-       ip_select_ident(iph, skb_dst(skb), NULL);
-       iptunnel_xmit(skb, dev);
+       ttl = tiph->ttl;
+       if (ttl == 0)
+               ttl = iph6->hop_limit;
+       tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
+
+       err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
+                           IPPROTO_IPV6, tos, ttl, df);
+       iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
        return NETDEV_TX_OK;
 
 tx_error_icmp:
@@ -877,6 +901,43 @@ tx_error:
        return NETDEV_TX_OK;
 }
 
+static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       const struct iphdr  *tiph = &tunnel->parms.iph;
+
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
+       ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
+       return NETDEV_TX_OK;
+}
+
+static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
+                                  struct net_device *dev)
+{
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               ipip_tunnel_xmit(skb, dev);
+               break;
+       case htons(ETH_P_IPV6):
+               ipip6_tunnel_xmit(skb, dev);
+               break;
+       default:
+               goto tx_err;
+       }
+
+       return NETDEV_TX_OK;
+
+tx_err:
+       dev->stats.tx_errors++;
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+
+}
+
 static void ipip6_tunnel_bind_dev(struct net_device *dev)
 {
        struct net_device *tdev = NULL;
@@ -888,7 +949,8 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
        iph = &tunnel->parms.iph;
 
        if (iph->daddr) {
-               struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
+               struct rtable *rt = ip_route_output_ports(tunnel->net, &fl4,
+                                                         NULL,
                                                          iph->daddr, iph->saddr,
                                                          0, 0,
                                                          IPPROTO_IPV6,
@@ -903,7 +965,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
        }
 
        if (!tdev && tunnel->parms.link)
-               tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
+               tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
 
        if (tdev) {
                dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
@@ -916,7 +978,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
 
 static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
 {
-       struct net *net = dev_net(t->dev);
+       struct net *net = t->net;
        struct sit_net *sitn = net_generic(net, sit_net_id);
 
        ipip6_tunnel_unlink(sitn, t);
@@ -1027,7 +1089,11 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                        goto done;
 
                err = -EINVAL;
-               if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
+               if (p.iph.protocol != IPPROTO_IPV6 &&
+                   p.iph.protocol != IPPROTO_IPIP &&
+                   p.iph.protocol != 0)
+                       goto done;
+               if (p.iph.version != 4 ||
                    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
                        goto done;
                if (p.iph.ttl)
@@ -1164,7 +1230,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
 
 static const struct net_device_ops ipip6_netdev_ops = {
        .ndo_uninit     = ipip6_tunnel_uninit,
-       .ndo_start_xmit = ipip6_tunnel_xmit,
+       .ndo_start_xmit = sit_tunnel_xmit,
        .ndo_do_ioctl   = ipip6_tunnel_ioctl,
        .ndo_change_mtu = ipip6_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
@@ -1188,7 +1254,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        dev->priv_flags        &= ~IFF_XMIT_DST_RELEASE;
        dev->iflink             = 0;
        dev->addr_len           = 4;
-       dev->features           |= NETIF_F_NETNS_LOCAL;
        dev->features           |= NETIF_F_LLTX;
 }
 
@@ -1197,6 +1262,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
+       tunnel->net = dev_net(dev);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1217,6 +1283,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        struct sit_net *sitn = net_generic(net, sit_net_id);
 
        tunnel->dev = dev;
+       tunnel->net = dev_net(dev);
        strcpy(tunnel->parms.name, dev->name);
 
        iph->version            = 4;
@@ -1232,6 +1299,22 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        return 0;
 }
 
+static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       u8 proto;
+
+       if (!data || !data[IFLA_IPTUN_PROTO])
+               return 0;
+
+       proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+       if (proto != IPPROTO_IPV6 &&
+           proto != IPPROTO_IPIP &&
+           proto != 0)
+               return -EINVAL;
+
+       return 0;
+}
+
 static void ipip6_netlink_parms(struct nlattr *data[],
                                struct ip_tunnel_parm *parms)
 {
@@ -1268,6 +1351,10 @@ static void ipip6_netlink_parms(struct nlattr *data[],
 
        if (data[IFLA_IPTUN_FLAGS])
                parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
+
+       if (data[IFLA_IPTUN_PROTO])
+               parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+
 }
 
 #ifdef CONFIG_IPV6_SIT_6RD
@@ -1391,6 +1478,8 @@ static size_t ipip6_get_size(const struct net_device *dev)
                nla_total_size(1) +
                /* IFLA_IPTUN_FLAGS */
                nla_total_size(2) +
+               /* IFLA_IPTUN_PROTO */
+               nla_total_size(1) +
 #ifdef CONFIG_IPV6_SIT_6RD
                /* IFLA_IPTUN_6RD_PREFIX */
                nla_total_size(sizeof(struct in6_addr)) +
@@ -1416,6 +1505,7 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
            nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
                       !!(parm->iph.frag_off & htons(IP_DF))) ||
+           nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
            nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags))
                goto nla_put_failure;
 
@@ -1445,6 +1535,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
        [IFLA_IPTUN_TOS]                = { .type = NLA_U8 },
        [IFLA_IPTUN_PMTUDISC]           = { .type = NLA_U8 },
        [IFLA_IPTUN_FLAGS]              = { .type = NLA_U16 },
+       [IFLA_IPTUN_PROTO]              = { .type = NLA_U8 },
 #ifdef CONFIG_IPV6_SIT_6RD
        [IFLA_IPTUN_6RD_PREFIX]         = { .len = sizeof(struct in6_addr) },
        [IFLA_IPTUN_6RD_RELAY_PREFIX]   = { .type = NLA_U32 },
@@ -1459,6 +1550,7 @@ static struct rtnl_link_ops sit_link_ops __read_mostly = {
        .policy         = ipip6_policy,
        .priv_size      = sizeof(struct ip_tunnel),
        .setup          = ipip6_tunnel_setup,
+       .validate       = ipip6_validate,
        .newlink        = ipip6_newlink,
        .changelink     = ipip6_changelink,
        .get_size       = ipip6_get_size,
@@ -1471,10 +1563,22 @@ static struct xfrm_tunnel sit_handler __read_mostly = {
        .priority       =       1,
 };
 
+static struct xfrm_tunnel ipip_handler __read_mostly = {
+       .handler        =       ipip_rcv,
+       .err_handler    =       ipip6_err,
+       .priority       =       2,
+};
+
 static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
 {
+       struct net *net = dev_net(sitn->fb_tunnel_dev);
+       struct net_device *dev, *aux;
        int prio;
 
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == &sit_link_ops)
+                       unregister_netdevice_queue(dev, head);
+
        for (prio = 1; prio < 4; prio++) {
                int h;
                for (h = 0; h < HASH_SIZE; h++) {
@@ -1482,7 +1586,12 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
 
                        t = rtnl_dereference(sitn->tunnels[prio][h]);
                        while (t != NULL) {
-                               unregister_netdevice_queue(t->dev, head);
+                               /* If dev is in the same netns, it has already
+                                * been added to the list by the previous loop.
+                                */
+                               if (dev_net(t->dev) != net)
+                                       unregister_netdevice_queue(t->dev,
+                                                                  head);
                                t = rtnl_dereference(t->next);
                        }
                }
@@ -1507,6 +1616,10 @@ static int __net_init sit_init_net(struct net *net)
                goto err_alloc_dev;
        }
        dev_net_set(sitn->fb_tunnel_dev, net);
+       /* FB netdevice is special: we have one, and only one per netns.
+        * Allowing to move it to another netns is clearly unsafe.
+        */
+       sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
 
        err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
        if (err)
@@ -1553,6 +1666,7 @@ static void __exit sit_cleanup(void)
 {
        rtnl_link_unregister(&sit_link_ops);
        xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
+       xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
 
        unregister_pernet_device(&sit_net_ops);
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
@@ -1569,9 +1683,14 @@ static int __init sit_init(void)
                return err;
        err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
        if (err < 0) {
-               pr_info("%s: can't add protocol\n", __func__);
+               pr_info("%s: can't register ip6ip4\n", __func__);
                goto xfrm_tunnel_failed;
        }
+       err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+       if (err < 0) {
+               pr_info("%s: can't register ip4ip4\n", __func__);
+               goto xfrm_tunnel4_failed;
+       }
        err = rtnl_link_register(&sit_link_ops);
        if (err < 0)
                goto rtnl_link_failed;
@@ -1580,6 +1699,8 @@ out:
        return err;
 
 rtnl_link_failed:
+       xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+xfrm_tunnel4_failed:
        xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
 xfrm_tunnel_failed:
        unregister_pernet_device(&sit_net_ops);
index e85c48bd404f4036b0c4e7db1e29bd6a92192f75..107b2f1d90ae8ff5e092c8306215c21f82350314 100644 (file)
@@ -16,7 +16,7 @@
 #include <net/addrconf.h>
 #include <net/inet_frag.h>
 
-static ctl_table ipv6_table_template[] = {
+static struct ctl_table ipv6_table_template[] = {
        {
                .procname       = "bindv6only",
                .data           = &init_net.ipv6.sysctl.bindv6only,
@@ -27,7 +27,7 @@ static ctl_table ipv6_table_template[] = {
        { }
 };
 
-static ctl_table ipv6_rotable[] = {
+static struct ctl_table ipv6_rotable[] = {
        {
                .procname       = "mld_max_msf",
                .data           = &sysctl_mld_max_msf,
index 71167069b394e741101d33486cb8bd1e2680c68f..5cffa5c3e6b810a39a649c99753d0b68f54f07fd 100644 (file)
@@ -63,6 +63,7 @@
 #include <net/inet_common.h>
 #include <net/secure_seq.h>
 #include <net/tcp_memcontrol.h>
+#include <net/ll_poll.h>
 
 #include <asm/uaccess.h>
 
@@ -1498,6 +1499,7 @@ process:
        if (sk_filter(sk, skb))
                goto discard_and_relse;
 
+       sk_mark_ll(sk, skb);
        skb->dev = NULL;
 
        bh_lock_sock_nested(sk);
@@ -1890,6 +1892,17 @@ void tcp6_proc_exit(struct net *net)
 }
 #endif
 
+static void tcp_v6_clear_sk(struct sock *sk, int size)
+{
+       struct inet_sock *inet = inet_sk(sk);
+
+       /* we do not want to clear pinet6 field, because of RCU lookups */
+       sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+       size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+       memset(&inet->pinet6 + 1, 0, size);
+}
+
 struct proto tcpv6_prot = {
        .name                   = "TCPv6",
        .owner                  = THIS_MODULE,
@@ -1933,6 +1946,7 @@ struct proto tcpv6_prot = {
 #ifdef CONFIG_MEMCG_KMEM
        .proto_cgroup           = tcp_proto_cgroup,
 #endif
+       .clear_sk               = tcp_v6_clear_sk,
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
index d4defdd449372604c21f2d3c353c20339e920d5c..f77e34c5a0e20f9639659b11814662a3547d25ea 100644 (file)
@@ -46,6 +46,7 @@
 #include <net/ip6_checksum.h>
 #include <net/xfrm.h>
 #include <net/inet6_hashtables.h>
+#include <net/ll_poll.h>
 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -841,7 +842,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
         */
        sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
        if (sk != NULL) {
-               int ret = udpv6_queue_rcv_skb(sk, skb);
+               int ret;
+
+               sk_mark_ll(sk, skb);
+               ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
                /* a return value > 0 means to resubmit the input, but
@@ -1359,48 +1363,17 @@ static const struct inet6_protocol udpv6_protocol = {
 
 /* ------------------------------------------------------------------------ */
 #ifdef CONFIG_PROC_FS
-
-static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket)
-{
-       struct inet_sock *inet = inet_sk(sp);
-       struct ipv6_pinfo *np = inet6_sk(sp);
-       const struct in6_addr *dest, *src;
-       __u16 destp, srcp;
-
-       dest  = &np->daddr;
-       src   = &np->rcv_saddr;
-       destp = ntohs(inet->inet_dport);
-       srcp  = ntohs(inet->inet_sport);
-       seq_printf(seq,
-                  "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
-                  bucket,
-                  src->s6_addr32[0], src->s6_addr32[1],
-                  src->s6_addr32[2], src->s6_addr32[3], srcp,
-                  dest->s6_addr32[0], dest->s6_addr32[1],
-                  dest->s6_addr32[2], dest->s6_addr32[3], destp,
-                  sp->sk_state,
-                  sk_wmem_alloc_get(sp),
-                  sk_rmem_alloc_get(sp),
-                  0, 0L, 0,
-                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
-                  0,
-                  sock_i_ino(sp),
-                  atomic_read(&sp->sk_refcnt), sp,
-                  atomic_read(&sp->sk_drops));
-}
-
 int udp6_seq_show(struct seq_file *seq, void *v)
 {
-       if (v == SEQ_START_TOKEN)
-               seq_printf(seq,
-                          "  sl  "
-                          "local_address                         "
-                          "remote_address                        "
-                          "st tx_queue rx_queue tr tm->when retrnsmt"
-                          "   uid  timeout inode ref pointer drops\n");
-       else
-               udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket);
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
+       } else {
+               int bucket = ((struct udp_iter_state *)seq->private)->bucket;
+               struct inet_sock *inet = inet_sk(v);
+               __u16 srcp = ntohs(inet->inet_sport);
+               __u16 destp = ntohs(inet->inet_dport);
+               ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
+       }
        return 0;
 }
 
@@ -1432,6 +1405,17 @@ void udp6_proc_exit(struct net *net) {
 }
 #endif /* CONFIG_PROC_FS */
 
+void udp_v6_clear_sk(struct sock *sk, int size)
+{
+       struct inet_sock *inet = inet_sk(sk);
+
+       /* we do not want to clear pinet6 field, because of RCU lookups */
+       sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+       size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+       memset(&inet->pinet6 + 1, 0, size);
+}
+
 /* ------------------------------------------------------------------------ */
 
 struct proto udpv6_prot = {
@@ -1462,7 +1446,7 @@ struct proto udpv6_prot = {
        .compat_setsockopt = compat_udpv6_setsockopt,
        .compat_getsockopt = compat_udpv6_getsockopt,
 #endif
-       .clear_sk          = sk_prot_clear_portaddr_nulls,
+       .clear_sk          = udp_v6_clear_sk,
 };
 
 static struct inet_protosw udpv6_protosw = {
index d7571046bfc440386a52b71bdb2c5295e4360001..4691ed50a9282a108e43ce8e7de56d79c1635c4a 100644 (file)
@@ -31,6 +31,8 @@ extern int    udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
 extern int     udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
 extern void    udpv6_destroy_sock(struct sock *sk);
 
+extern void udp_v6_clear_sk(struct sock *sk, int size);
+
 #ifdef CONFIG_PROC_FS
 extern int     udp6_seq_show(struct seq_file *seq, void *v);
 #endif
index 3bb3a891a42416b23ddb278d3fd7c051d25cfcf7..5d1b8d7ac9931c7d0cbe35c06318d08659a5af7b 100644 (file)
@@ -46,11 +46,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
        unsigned int mss;
        unsigned int unfrag_ip6hlen, unfrag_len;
        struct frag_hdr *fptr;
-       u8 *mac_start, *prevhdr;
+       u8 *packet_start, *prevhdr;
        u8 nexthdr;
        u8 frag_hdr_sz = sizeof(struct frag_hdr);
        int offset;
        __wsum csum;
+       int tnl_hlen;
 
        mss = skb_shinfo(skb)->gso_size;
        if (unlikely(skb->len <= mss))
@@ -63,7 +64,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                if (unlikely(type & ~(SKB_GSO_UDP |
                                      SKB_GSO_DODGY |
                                      SKB_GSO_UDP_TUNNEL |
-                                     SKB_GSO_GRE) ||
+                                     SKB_GSO_GRE |
+                                     SKB_GSO_MPLS) ||
                             !(type & (SKB_GSO_UDP))))
                        goto out;
 
@@ -83,9 +85,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Check if there is enough headroom to insert fragment header. */
-       if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
-           pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
-               goto out;
+       tnl_hlen = skb_tnl_header_len(skb);
+       if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
+               if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+                       goto out;
+       }
 
        /* Find the unfragmentable header and shift it left by frag_hdr_sz
         * bytes to insert fragment header.
@@ -93,11 +97,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
        nexthdr = *prevhdr;
        *prevhdr = NEXTHDR_FRAGMENT;
-       unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
-                    unfrag_ip6hlen;
-       mac_start = skb_mac_header(skb);
-       memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
+       unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+                    unfrag_ip6hlen + tnl_hlen;
+       packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+       memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
 
+       SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
        skb->mac_header -= frag_hdr_sz;
        skb->network_header -= frag_hdr_sz;
 
index 1d08e21d9f6993e92a4a16b589784873aaf63fd8..dfcc4be46898281f09038bbd7638edfefa04ea3e 100644 (file)
@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
        .compat_setsockopt = compat_udpv6_setsockopt,
        .compat_getsockopt = compat_udpv6_getsockopt,
 #endif
-       .clear_sk          = sk_prot_clear_portaddr_nulls,
+       .clear_sk          = udp_v6_clear_sk,
 };
 
 static struct inet_protosw udplite6_protosw = {
index 4ef7bdb65440ca965561d4ca6fdd10b4d97f97d6..23ed03d786c8376cc59f9fa2cf577ee01a4c2c2d 100644 (file)
@@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        dev_hold(dev);
 
        xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
-       if (!xdst->u.rt6.rt6i_idev)
+       if (!xdst->u.rt6.rt6i_idev) {
+               dev_put(dev);
                return -ENODEV;
+       }
 
        rt6_transfer_peer(&xdst->u.rt6, rt);
 
index f547a47d381ca0596244bfbf692ef8134b508ccc..7a1e0fc1bd4dd2ca8f31d8a376d94a30406cb1e1 100644 (file)
@@ -330,7 +330,7 @@ static __inline__ void __ipxitf_put(struct ipx_interface *intrfc)
 static int ipxitf_device_event(struct notifier_block *notifier,
                                unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct ipx_interface *i, *tmp;
 
        if (!net_eq(dev_net(dev), &init_net))
index 8c004161a843a2e33a963ddd254c4a0e5a129969..9ea0c933b9ff8803c071367bfef0df210090a90f 100644 (file)
@@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
                /*
                 *  We now have some discovery info to deliver!
                 */
-               discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
+               discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
                if (!discovery) {
                        IRDA_WARNING("%s: unable to malloc!\n", __func__);
                        return;
index de73f6496db5a4842c54e1c5a9013e01f39a8988..d6a59651767a89ca8a2a485665c0040a7ff0c641 100644 (file)
@@ -73,7 +73,7 @@ static int min_lap_keepalive_time = 100;      /* 100us */
 /* For other sysctl, I've no idea of the range. Maybe Dag could help
  * us on that - Jean II */
 
-static int do_devname(ctl_table *table, int write,
+static int do_devname(struct ctl_table *table, int write,
                      void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int ret;
@@ -90,7 +90,7 @@ static int do_devname(ctl_table *table, int write,
 }
 
 
-static int do_discovery(ctl_table *table, int write,
+static int do_discovery(struct ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int ret;
@@ -111,7 +111,7 @@ static int do_discovery(ctl_table *table, int write,
 }
 
 /* One file */
-static ctl_table irda_table[] = {
+static struct ctl_table irda_table[] = {
        {
                .procname       = "discovery",
                .data           = &sysctl_discovery,
index ae691651b72141d649a4cca1aaaedc8920db5192..168aff5e60de528194a1f4b8ea836cf394ea6613 100644 (file)
@@ -2293,7 +2293,7 @@ out_unlock:
 static int afiucv_netdev_event(struct notifier_block *this,
                               unsigned long event, void *ptr)
 {
-       struct net_device *event_dev = (struct net_device *)ptr;
+       struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
        struct sock *sk;
        struct iucv_sock *iucv;
 
index 5b1e5af257137e4c6a03a2c575f1adb5a949e25e..c5fbd7589681cf984220325fff463696163340d0 100644 (file)
@@ -2366,6 +2366,8 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
 
 out:
        xfrm_pol_put(xp);
+       if (err == 0)
+               xfrm_garbage_collect(net);
        return err;
 }
 
@@ -2615,6 +2617,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
 
 out:
        xfrm_pol_put(xp);
+       if (delete && err == 0)
+               xfrm_garbage_collect(net);
        return err;
 }
 
index 637a341c1e2d1a466c68efcb50d7fc201198a15b..8dec6876dc508265084cc80314e69d6b9d25aa25 100644 (file)
@@ -346,19 +346,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
        skb_put(skb, 2);
 
        /* Copy user data into skb */
-       error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+       error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
+                                total_len);
        if (error < 0) {
                kfree_skb(skb);
                goto error_put_sess_tun;
        }
-       skb_put(skb, total_len);
 
        l2tp_xmit_skb(session, skb, session->hdr_len);
 
        sock_put(ps->tunnel_sock);
        sock_put(sk);
 
-       return error;
+       return total_len;
 
 error_put_sess_tun:
        sock_put(ps->tunnel_sock);
index a2a8250e2f845d6441f60e177357900d3d411155..cc117591f678463c2cd9a3eb4060af5e01bf51ca 100644 (file)
@@ -1764,10 +1764,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
 }
 
 static int netdev_notify(struct notifier_block *nb,
-                        unsigned long state,
-                        void *ndev)
+                        unsigned long state, void *ptr)
 {
-       struct net_device *dev = ndev;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct ieee80211_sub_if_data *sdata;
 
        if (state != NETDEV_CHANGENAME)
diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig
new file mode 100644 (file)
index 0000000..37421db
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# MPLS configuration
+#
+config NET_MPLS_GSO
+       tristate "MPLS: GSO support"
+       help
+        This is helper module to allow segmentation of non-MPLS GSO packets
+        that have had MPLS stack entries pushed onto them and thus
+        become MPLS GSO packets.
diff --git a/net/mpls/Makefile b/net/mpls/Makefile
new file mode 100644 (file)
index 0000000..0a3c171
--- /dev/null
@@ -0,0 +1,4 @@
+#
+# Makefile for MPLS.
+#
+obj-y += mpls_gso.o
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
new file mode 100644 (file)
index 0000000..1bec121
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ *     MPLS GSO Support
+ *
+ *     Authors: Simon Horman (horms@verge.net.au)
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ *
+ *     Based on: GSO portions of net/ipv4/gre.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
+                                      netdev_features_t features)
+{
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       netdev_features_t mpls_features;
+       __be16 mpls_protocol;
+
+       if (unlikely(skb_shinfo(skb)->gso_type &
+                               ~(SKB_GSO_TCPV4 |
+                                 SKB_GSO_TCPV6 |
+                                 SKB_GSO_UDP |
+                                 SKB_GSO_DODGY |
+                                 SKB_GSO_TCP_ECN |
+                                 SKB_GSO_GRE |
+                                 SKB_GSO_MPLS)))
+               goto out;
+
+       /* Setup inner SKB. */
+       mpls_protocol = skb->protocol;
+       skb->protocol = skb->inner_protocol;
+
+       /* Push back the mac header that skb_mac_gso_segment() has pulled.
+        * It will be re-pulled by the call to skb_mac_gso_segment() below
+        */
+       __skb_push(skb, skb->mac_len);
+
+       /* Segment inner packet. */
+       mpls_features = skb->dev->mpls_features & netif_skb_features(skb);
+       segs = skb_mac_gso_segment(skb, mpls_features);
+
+
+       /* Restore outer protocol. */
+       skb->protocol = mpls_protocol;
+
+       /* Re-pull the mac header that the call to skb_mac_gso_segment()
+        * above pulled.  It will be re-pushed after returning
+        * skb_mac_gso_segment(), an indirect caller of this function.
+        */
+       __skb_push(skb, skb->data - skb_mac_header(skb));
+
+out:
+       return segs;
+}
+
+static int mpls_gso_send_check(struct sk_buff *skb)
+{
+       return 0;
+}
+
+static struct packet_offload mpls_mc_offload = {
+       .type = cpu_to_be16(ETH_P_MPLS_MC),
+       .callbacks = {
+               .gso_send_check =       mpls_gso_send_check,
+               .gso_segment    =       mpls_gso_segment,
+       },
+};
+
+static struct packet_offload mpls_uc_offload = {
+       .type = cpu_to_be16(ETH_P_MPLS_UC),
+       .callbacks = {
+               .gso_send_check =       mpls_gso_send_check,
+               .gso_segment    =       mpls_gso_segment,
+       },
+};
+
+static int __init mpls_gso_init(void)
+{
+       pr_info("MPLS GSO support\n");
+
+       dev_add_offload(&mpls_uc_offload);
+       dev_add_offload(&mpls_mc_offload);
+
+       return 0;
+}
+
+static void __exit mpls_gso_exit(void)
+{
+       dev_remove_offload(&mpls_uc_offload);
+       dev_remove_offload(&mpls_mc_offload);
+}
+
+module_init(mpls_gso_init);
+module_exit(mpls_gso_exit);
+
+MODULE_DESCRIPTION("MPLS GSO support");
+MODULE_AUTHOR("Simon Horman (horms@verge.net.au)");
+MODULE_LICENSE("GPL");
index 07c865a31a3d2adba8933c576f4228b0e8be6e55..2217363ab4229212b0f309d1f724e1f23f5ceb17 100644 (file)
@@ -30,6 +30,8 @@ static DEFINE_MUTEX(afinfo_mutex);
 
 const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
 EXPORT_SYMBOL(nf_afinfo);
+const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
 int nf_register_afinfo(const struct nf_afinfo *afinfo)
 {
@@ -302,17 +304,26 @@ static struct pernet_operations netfilter_net_ops = {
        .exit = netfilter_net_exit,
 };
 
-void __init netfilter_init(void)
+int __init netfilter_init(void)
 {
-       int i, h;
+       int i, h, ret;
+
        for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
                for (h = 0; h < NF_MAX_HOOKS; h++)
                        INIT_LIST_HEAD(&nf_hooks[i][h]);
        }
 
-       if (register_pernet_subsys(&netfilter_net_ops) < 0)
-               panic("cannot create netfilter proc entry");
+       ret = register_pernet_subsys(&netfilter_net_ops);
+       if (ret < 0)
+               goto err;
+
+       ret = netfilter_log_init();
+       if (ret < 0)
+               goto err_pernet;
 
-       if (netfilter_log_init() < 0)
-               panic("cannot initialize nf_log");
+       return 0;
+err_pernet:
+       unregister_pernet_subsys(&netfilter_net_ops);
+err:
+       return ret;
 }
index a083bda322b6058cf0ba6b65b604bb046c8a79fd..c8c52a98590ba43ffd97f26a7689650384fd95ca 100644 (file)
@@ -975,8 +975,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
                                return cp;
                        }
                }
-               rcu_read_unlock();
-               rcu_read_lock();
+               cond_resched_rcu();
        }
 
        return NULL;
@@ -1015,8 +1014,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                        iter->l = &ip_vs_conn_tab[idx];
                        return cp;
                }
-               rcu_read_unlock();
-               rcu_read_lock();
+               cond_resched_rcu();
        }
        iter->l = NULL;
        return NULL;
@@ -1206,17 +1204,13 @@ void ip_vs_random_dropentry(struct net *net)
        int idx;
        struct ip_vs_conn *cp, *cp_c;
 
+       rcu_read_lock();
        /*
         * Randomly scan 1/32 of the whole table every second
         */
        for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
                unsigned int hash = net_random() & ip_vs_conn_tab_mask;
 
-               /*
-                *  Lock is actually needed in this loop.
-                */
-               rcu_read_lock();
-
                hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
                        if (cp->flags & IP_VS_CONN_F_TEMPLATE)
                                /* connection template */
@@ -1252,8 +1246,9 @@ void ip_vs_random_dropentry(struct net *net)
                                __ip_vs_conn_put(cp);
                        }
                }
-               rcu_read_unlock();
+               cond_resched_rcu();
        }
+       rcu_read_unlock();
 }
 
 
@@ -1267,11 +1262,8 @@ static void ip_vs_conn_flush(struct net *net)
        struct netns_ipvs *ipvs = net_ipvs(net);
 
 flush_again:
+       rcu_read_lock();
        for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
-               /*
-                *  Lock is actually needed in this loop.
-                */
-               rcu_read_lock();
 
                hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
                        if (!ip_vs_conn_net_eq(cp, net))
@@ -1286,8 +1278,9 @@ flush_again:
                                __ip_vs_conn_put(cp);
                        }
                }
-               rcu_read_unlock();
+               cond_resched_rcu();
        }
+       rcu_read_unlock();
 
        /* the counter may be not NULL, because maybe some conn entries
           are run by slow timer handler or unhashed but still referred */
index 085b5880ab0de4aa90819d9729a13ff5b635cc15..05565d2b3a61b530acad48cfeda90b2af4b3adda 100644 (file)
@@ -1001,6 +1001,32 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
        return th->rst;
 }
 
+static inline bool is_new_conn(const struct sk_buff *skb,
+                              struct ip_vs_iphdr *iph)
+{
+       switch (iph->protocol) {
+       case IPPROTO_TCP: {
+               struct tcphdr _tcph, *th;
+
+               th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+               if (th == NULL)
+                       return false;
+               return th->syn;
+       }
+       case IPPROTO_SCTP: {
+               sctp_chunkhdr_t *sch, schunk;
+
+               sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
+                                        sizeof(schunk), &schunk);
+               if (sch == NULL)
+                       return false;
+               return sch->type == SCTP_CID_INIT;
+       }
+       default:
+               return false;
+       }
+}
+
 /* Handle response packets: rewrite addresses and send away...
  */
 static unsigned int
@@ -1612,6 +1638,15 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
         * Check if the packet belongs to an existing connection entry
         */
        cp = pp->conn_in_get(af, skb, &iph, 0);
+
+       if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
+           unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
+           is_new_conn(skb, &iph)) {
+               ip_vs_conn_expire_now(cp);
+               __ip_vs_conn_put(cp);
+               cp = NULL;
+       }
+
        if (unlikely(!cp) && !iph.fragoffs) {
                /* No (second) fragments need to enter here, as nf_defrag_ipv6
                 * replayed fragment zero will already have created the cp
index 5b142fb164801bee9ab126d00fa3e485cdf96c60..47e510819f54c737a171b4adb4b241104d36e09d 100644 (file)
@@ -1487,9 +1487,9 @@ ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev)
  * Currently only NETDEV_DOWN is handled to release refs to cached dsts
  */
 static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
-                           void *ptr)
+                          void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
        struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_service *svc;
@@ -1575,7 +1575,7 @@ static int zero;
 static int three = 3;
 
 static int
-proc_do_defense_mode(ctl_table *table, int write,
+proc_do_defense_mode(struct ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
@@ -1596,7 +1596,7 @@ proc_do_defense_mode(ctl_table *table, int write,
 }
 
 static int
-proc_do_sync_threshold(ctl_table *table, int write,
+proc_do_sync_threshold(struct ctl_table *table, int write,
                       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int *valp = table->data;
@@ -1616,7 +1616,7 @@ proc_do_sync_threshold(ctl_table *table, int write,
 }
 
 static int
-proc_do_sync_mode(ctl_table *table, int write,
+proc_do_sync_mode(struct ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int *valp = table->data;
@@ -1634,7 +1634,7 @@ proc_do_sync_mode(ctl_table *table, int write,
 }
 
 static int
-proc_do_sync_ports(ctl_table *table, int write,
+proc_do_sync_ports(struct ctl_table *table, int write,
                   void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int *valp = table->data;
@@ -1716,9 +1716,9 @@ static struct ctl_table vs_vars[] = {
        },
        {
                .procname       = "sync_qlen_max",
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_doulongvec_minmax,
        },
        {
                .procname       = "sync_sock_size",
@@ -2542,6 +2542,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
                struct ip_vs_dest *dest;
                struct ip_vs_dest_entry entry;
 
+               memset(&entry, 0, sizeof(entry));
                list_for_each_entry(dest, &svc->destinations, n_list) {
                        if (count >= get->num_dests)
                                break;
index 5ea26bd87743aea6591ff0d1005489b7de06f059..44595b8ae37f683d669fbdca7353cfa986984477 100644 (file)
@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
  *      IPVS LBLC sysctl table
  */
 #ifdef CONFIG_SYSCTL
-static ctl_table vs_vars_table[] = {
+static struct ctl_table vs_vars_table[] = {
        {
                .procname       = "lblc_expiration",
                .data           = NULL,
index 50123c2ab4847e204b41ebca06a93bc04f228f69..876937db0bf46d5227b41b9084bddbca4e36e483 100644 (file)
@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
  *      IPVS LBLCR sysctl table
  */
 
-static ctl_table vs_vars_table[] = {
+static struct ctl_table vs_vars_table[] = {
        {
                .procname       = "lblcr_expiration",
                .data           = NULL,
index 0df269d7c99f6d9513cae41e46a5879f691a423b..a65edfe4b16c91d6adb0ee9a54ee345d9bc5fa80 100644 (file)
@@ -67,8 +67,8 @@ struct ip_vs_sh_bucket {
 #define IP_VS_SH_TAB_MASK               (IP_VS_SH_TAB_SIZE - 1)
 
 struct ip_vs_sh_state {
-       struct ip_vs_sh_bucket          buckets[IP_VS_SH_TAB_SIZE];
        struct rcu_head                 rcu_head;
+       struct ip_vs_sh_bucket          buckets[IP_VS_SH_TAB_SIZE];
 };
 
 /*
index 6b217074237b85ba1b4420ba889f6cfc155246eb..b8a0924064ef79abae8999e05d4622df961209c0 100644 (file)
@@ -55,10 +55,14 @@ unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
                                struct nf_conntrack_expect *exp);
 EXPORT_SYMBOL_GPL(nf_nat_ftp_hook);
 
-static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char);
-static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char);
+static int try_rfc959(const char *, size_t, struct nf_conntrack_man *,
+                     char, unsigned int *);
+static int try_rfc1123(const char *, size_t, struct nf_conntrack_man *,
+                      char, unsigned int *);
+static int try_eprt(const char *, size_t, struct nf_conntrack_man *,
+                   char, unsigned int *);
 static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *,
-                            char);
+                            char, unsigned int *);
 
 static struct ftp_search {
        const char *pattern;
@@ -66,7 +70,7 @@ static struct ftp_search {
        char skip;
        char term;
        enum nf_ct_ftp_type ftptype;
-       int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char);
+       int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char, unsigned int *);
 } search[IP_CT_DIR_MAX][2] = {
        [IP_CT_DIR_ORIGINAL] = {
                {
@@ -90,10 +94,8 @@ static struct ftp_search {
                {
                        .pattern        = "227 ",
                        .plen           = sizeof("227 ") - 1,
-                       .skip           = '(',
-                       .term           = ')',
                        .ftptype        = NF_CT_FTP_PASV,
-                       .getnum         = try_rfc959,
+                       .getnum         = try_rfc1123,
                },
                {
                        .pattern        = "229 ",
@@ -132,8 +134,9 @@ static int try_number(const char *data, size_t dlen, u_int32_t array[],
                        i++;
                else {
                        /* Unexpected character; true if it's the
-                          terminator and we're finished. */
-                       if (*data == term && i == array_size - 1)
+                          terminator (or we don't care about one)
+                          and we're finished. */
+                       if ((*data == term || !term) && i == array_size - 1)
                                return len;
 
                        pr_debug("Char %u (got %u nums) `%u' unexpected\n",
@@ -148,7 +151,8 @@ static int try_number(const char *data, size_t dlen, u_int32_t array[],
 
 /* Returns 0, or length of numbers: 192,168,1,1,5,6 */
 static int try_rfc959(const char *data, size_t dlen,
-                     struct nf_conntrack_man *cmd, char term)
+                     struct nf_conntrack_man *cmd, char term,
+                     unsigned int *offset)
 {
        int length;
        u_int32_t array[6];
@@ -163,6 +167,33 @@ static int try_rfc959(const char *data, size_t dlen,
        return length;
 }
 
+/*
+ * From RFC 1123:
+ * The format of the 227 reply to a PASV command is not
+ * well standardized.  In particular, an FTP client cannot
+ * assume that the parentheses shown on page 40 of RFC-959
+ * will be present (and in fact, Figure 3 on page 43 omits
+ * them).  Therefore, a User-FTP program that interprets
+ * the PASV reply must scan the reply for the first digit
+ * of the host and port numbers.
+ */
+static int try_rfc1123(const char *data, size_t dlen,
+                      struct nf_conntrack_man *cmd, char term,
+                      unsigned int *offset)
+{
+       int i;
+       for (i = 0; i < dlen; i++)
+               if (isdigit(data[i]))
+                       break;
+
+       if (i == dlen)
+               return 0;
+
+       *offset += i;
+
+       return try_rfc959(data + i, dlen - i, cmd, 0, offset);
+}
+
 /* Grab port: number up to delimiter */
 static int get_port(const char *data, int start, size_t dlen, char delim,
                    __be16 *port)
@@ -191,7 +222,7 @@ static int get_port(const char *data, int start, size_t dlen, char delim,
 
 /* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */
 static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
-                   char term)
+                   char term, unsigned int *offset)
 {
        char delim;
        int length;
@@ -239,7 +270,8 @@ static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
 
 /* Returns 0, or length of numbers: |||6446| */
 static int try_epsv_response(const char *data, size_t dlen,
-                            struct nf_conntrack_man *cmd, char term)
+                            struct nf_conntrack_man *cmd, char term,
+                            unsigned int *offset)
 {
        char delim;
 
@@ -261,9 +293,10 @@ static int find_pattern(const char *data, size_t dlen,
                        unsigned int *numlen,
                        struct nf_conntrack_man *cmd,
                        int (*getnum)(const char *, size_t,
-                                     struct nf_conntrack_man *, char))
+                                     struct nf_conntrack_man *, char,
+                                     unsigned int *))
 {
-       size_t i;
+       size_t i = plen;
 
        pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen);
        if (dlen == 0)
@@ -293,16 +326,18 @@ static int find_pattern(const char *data, size_t dlen,
        pr_debug("Pattern matches!\n");
        /* Now we've found the constant string, try to skip
           to the 'skip' character */
-       for (i = plen; data[i] != skip; i++)
-               if (i == dlen - 1) return -1;
+       if (skip) {
+               for (i = plen; data[i] != skip; i++)
+                       if (i == dlen - 1) return -1;
 
-       /* Skip over the last character */
-       i++;
+               /* Skip over the last character */
+               i++;
+       }
 
        pr_debug("Skipped up to `%c'!\n", skip);
 
        *numoff = i;
-       *numlen = getnum(data + i, dlen - i, cmd, term);
+       *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
        if (!*numlen)
                return -1;
 
index bd700b4013c1fd86adc3ea5a64f29c652330e31a..f641751dba9dc467b4207716bdb0ac69208747f8 100644 (file)
@@ -408,7 +408,7 @@ static int log_invalid_proto_max = 255;
 
 static struct ctl_table_header *nf_ct_netfilter_header;
 
-static ctl_table nf_ct_sysctl_table[] = {
+static struct ctl_table nf_ct_sysctl_table[] = {
        {
                .procname       = "nf_conntrack_max",
                .data           = &nf_conntrack_max,
@@ -458,7 +458,7 @@ static ctl_table nf_ct_sysctl_table[] = {
 
 #define NET_NF_CONNTRACK_MAX 2089
 
-static ctl_table nf_ct_netfilter_table[] = {
+static struct ctl_table nf_ct_netfilter_table[] = {
        {
                .procname       = "nf_conntrack_max",
                .data           = &nf_conntrack_max,
index 388656d5a9ec45a6049af9e2915aaa7df1eefbbe..85296d4eac0e56c69052bbdab75269f3c7fd333e 100644 (file)
@@ -148,7 +148,7 @@ void nf_log_packet(struct net *net,
                va_start(args, fmt);
                vsnprintf(prefix, sizeof(prefix), fmt, args);
                va_end(args);
-               logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix);
+               logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
        }
        rcu_read_unlock();
 }
@@ -245,7 +245,7 @@ static const struct file_operations nflog_file_ops = {
 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
 static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
 
-static int nf_log_proc_dostring(ctl_table *table, int write,
+static int nf_log_proc_dostring(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        const struct nf_logger *logger;
@@ -368,17 +368,18 @@ static int __net_init nf_log_net_init(struct net *net)
        return 0;
 
 out_sysctl:
-       /* For init_net: errors will trigger panic, don't unroll on error. */
-       if (!net_eq(net, &init_net))
-               remove_proc_entry("nf_log", net->nf.proc_netfilter);
-
+#ifdef CONFIG_PROC_FS
+       remove_proc_entry("nf_log", net->nf.proc_netfilter);
+#endif
        return ret;
 }
 
 static void __net_exit nf_log_net_exit(struct net *net)
 {
        netfilter_log_sysctl_exit(net);
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("nf_log", net->nf.proc_netfilter);
+#endif
 }
 
 static struct pernet_operations nf_log_net_ops = {
index 5fea563afe30421ccfb5d1dcd294a055144623e0..85e20a9190816f348a845937b8a4fe470a917744 100644 (file)
@@ -104,7 +104,7 @@ static void mangle_contents(struct sk_buff *skb,
        /* move post-replacement */
        memmove(data + match_offset + rep_len,
                data + match_offset + match_len,
-               skb->tail - (skb->network_header + dataoff +
+               skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff +
                             match_offset + match_len));
 
        /* insert data from buffer */
index dc3fd5d44464a3ca7cdc5ed68d52ec71256b0cc2..c7b6d466a66247c3fa18b9a9a6f3e174a18d40da 100644 (file)
@@ -149,9 +149,12 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
-               if (last && cur != last)
-                       continue;
+               if (last) {
+                       if (cur != last)
+                               continue;
 
+                       last = NULL;
+               }
                if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq,
                                       NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
index 701c88a20fea4c2a3ca6c9b4b2189521e13f96fe..65074dfb9383a40faef6c27346399e7dc6711889 100644 (file)
@@ -220,9 +220,12 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        list_for_each_entry_rcu(cur, &cttimeout_list, head) {
-               if (last && cur != last)
-                       continue;
+               if (last) {
+                       if (cur != last)
+                               continue;
 
+                       last = NULL;
+               }
                if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq,
                                           NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
index faf1e9300d8adc358fa857ff4296892351060193..962e9792e3179997db98a448a76fc909432d841f 100644 (file)
@@ -602,7 +602,8 @@ static struct nf_loginfo default_loginfo = {
 
 /* log handler for internal netfilter logging api */
 void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+                 u_int8_t pf,
                  unsigned int hooknum,
                  const struct sk_buff *skb,
                  const struct net_device *in,
@@ -615,7 +616,6 @@ nfulnl_log_packet(u_int8_t pf,
        const struct nf_loginfo *li;
        unsigned int qthreshold;
        unsigned int plen;
-       struct net *net = dev_net(in ? in : out);
        struct nfnl_log_net *log = nfnl_log_pernet(net);
 
        if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
@@ -1045,7 +1045,9 @@ static int __net_init nfnl_log_net_init(struct net *net)
 
 static void __net_exit nfnl_log_net_exit(struct net *net)
 {
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
+#endif
 }
 
 static struct pernet_operations nfnl_log_net_ops = {
index 2e0e835baf7273427e84c971d4701f122e9c4a40..299a48ae5dc995c02c7598afed1d5959059ff0e3 100644 (file)
 
 #define NFQNL_QMAX_DEFAULT 1024
 
+/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
+ * includes the header length. Thus, the maximum packet length that we
+ * support is 65531 bytes. We send truncated packets if the specified length
+ * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
+ * attribute to detect truncation.
+ */
+#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
+
 struct nfqnl_instance {
        struct hlist_node hlist;                /* global list of queues */
        struct rcu_head rcu;
@@ -122,7 +130,7 @@ instance_create(struct nfnl_queue_net *q, u_int16_t queue_num,
        inst->queue_num = queue_num;
        inst->peer_portid = portid;
        inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
-       inst->copy_range = 0xffff;
+       inst->copy_range = NFQNL_MAX_COPY_RANGE;
        inst->copy_mode = NFQNL_COPY_NONE;
        spin_lock_init(&inst->lock);
        INIT_LIST_HEAD(&inst->queue_list);
@@ -333,10 +341,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                        return NULL;
 
                data_len = ACCESS_ONCE(queue->copy_range);
-               if (data_len == 0 || data_len > entskb->len)
+               if (data_len > entskb->len)
                        data_len = entskb->len;
 
-
                if (!entskb->head_frag ||
                    skb_headlen(entskb) < L1_CACHE_BYTES ||
                    skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS)
@@ -465,7 +472,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
                goto nla_put_failure;
 
-       if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
+       if (cap_len > data_len &&
+           nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
                goto nla_put_failure;
 
        if (nfqnl_put_packet_info(skb, entskb))
@@ -509,10 +517,6 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
        }
        spin_lock_bh(&queue->lock);
 
-       if (!queue->peer_portid) {
-               err = -EINVAL;
-               goto err_out_free_nskb;
-       }
        if (queue->queue_total >= queue->queue_maxlen) {
                if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
                        failopen = 1;
@@ -637,9 +641,6 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        if (queue->copy_mode == NFQNL_COPY_NONE)
                return -EINVAL;
 
-       if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb))
-               return __nfqnl_enqueue_packet(net, queue, entry);
-
        skb = entry->skb;
 
        switch (entry->pf) {
@@ -651,6 +652,9 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
                break;
        }
 
+       if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
+               return __nfqnl_enqueue_packet(net, queue, entry);
+
        nf_bridge_adjust_skb_data(skb);
        segs = skb_gso_segment(skb, 0);
        /* Does not use PTR_ERR to limit the number of error codes that can be
@@ -731,13 +735,8 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
 
        case NFQNL_COPY_PACKET:
                queue->copy_mode = mode;
-               /* We're using struct nlattr which has 16bit nla_len. Note that
-                * nla_len includes the header length. Thus, the maximum packet
-                * length that we support is 65531 bytes. We send truncated
-                * packets if the specified length is larger than that.
-                */
-               if (range > 0xffff - NLA_HDRLEN)
-                       queue->copy_range = 0xffff - NLA_HDRLEN;
+               if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
+                       queue->copy_range = NFQNL_MAX_COPY_RANGE;
                else
                        queue->copy_range = range;
                break;
@@ -800,7 +799,7 @@ static int
 nfqnl_rcv_dev_event(struct notifier_block *this,
                    unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        /* Drop any packets associated with the downed device */
        if (event == NETDEV_DOWN)
@@ -1285,7 +1284,9 @@ static int __net_init nfnl_queue_net_init(struct net *net)
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
+#endif
 }
 
 static struct pernet_operations nfnl_queue_net_ops = {
index a60261cb0e80b6048589bd89e894e9798825f1f8..da35ac06a975ded85b76953e4d7eb54c33478b04 100644 (file)
@@ -26,6 +26,9 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
        if (skb->nfct != NULL)
                return XT_CONTINUE;
 
+       /* special case the untracked ct : we want the percpu object */
+       if (!ct)
+               ct = nf_ct_untracked_get();
        atomic_inc(&ct->ct_general.use);
        skb->nfct = &ct->ct_general;
        skb->nfctinfo = IP_CT_NEW;
@@ -186,8 +189,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
        int ret = -EOPNOTSUPP;
 
        if (info->flags & XT_CT_NOTRACK) {
-               ct = nf_ct_untracked_get();
-               atomic_inc(&ct->ct_general.use);
+               ct = NULL;
                goto out;
        }
 
@@ -311,7 +313,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
        struct nf_conn *ct = info->ct;
        struct nf_conn_help *help;
 
-       if (!nf_ct_is_untracked(ct)) {
+       if (ct && !nf_ct_is_untracked(ct)) {
                help = nfct_help(ct);
                if (help)
                        module_put(help->helper->me);
@@ -319,8 +321,8 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
                nf_ct_l3proto_module_put(par->family);
 
                xt_ct_destroy_timeout(ct);
+               nf_ct_put(info->ct);
        }
-       nf_ct_put(info->ct);
 }
 
 static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
index fe573f6c9e9161e4201e6cbc8698c3b58b0583b2..5ab24843370a0b452e8647b4b0a99d54c96d6087 100644 (file)
@@ -466,7 +466,8 @@ log_packet_common(struct sbuff *m,
 
 
 static void
-ipt_log_packet(u_int8_t pf,
+ipt_log_packet(struct net *net,
+              u_int8_t pf,
               unsigned int hooknum,
               const struct sk_buff *skb,
               const struct net_device *in,
@@ -475,7 +476,6 @@ ipt_log_packet(u_int8_t pf,
               const char *prefix)
 {
        struct sbuff *m;
-       struct net *net = dev_net(in ? in : out);
 
        /* FIXME: Disabled from containers until syslog ns is supported */
        if (!net_eq(net, &init_net))
@@ -737,7 +737,7 @@ static void dump_ipv6_packet(struct sbuff *m,
                dump_sk_uid_gid(m, skb->sk);
 
        /* Max length: 16 "MARK=0xFFFFFFFF " */
-       if (!recurse && skb->mark)
+       if (recurse && skb->mark)
                sb_add(m, "MARK=0x%x ", skb->mark);
 }
 
@@ -797,7 +797,8 @@ fallback:
 }
 
 static void
-ip6t_log_packet(u_int8_t pf,
+ip6t_log_packet(struct net *net,
+               u_int8_t pf,
                unsigned int hooknum,
                const struct sk_buff *skb,
                const struct net_device *in,
@@ -806,7 +807,6 @@ ip6t_log_packet(u_int8_t pf,
                const char *prefix)
 {
        struct sbuff *m;
-       struct net *net = dev_net(in ? in : out);
 
        /* FIXME: Disabled from containers until syslog ns is supported */
        if (!net_eq(net, &init_net))
@@ -833,17 +833,18 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_log_info *loginfo = par->targinfo;
        struct nf_loginfo li;
+       struct net *net = dev_net(par->in ? par->in : par->out);
 
        li.type = NF_LOG_TYPE_LOG;
        li.u.log.level = loginfo->level;
        li.u.log.logflags = loginfo->logflags;
 
        if (par->family == NFPROTO_IPV4)
-               ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
+               ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
                               par->out, &li, loginfo->prefix);
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        else if (par->family == NFPROTO_IPV6)
-               ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
+               ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
                                par->out, &li, loginfo->prefix);
 #endif
        else
index a17dd0f589b22d3ffce573177414c4afa0790c1d..fb7497c928a0158675e377fe7b1f3ba4c043a8a9 100644 (file)
@@ -26,13 +26,14 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_nflog_info *info = par->targinfo;
        struct nf_loginfo li;
+       struct net *net = dev_net(par->in ? par->in : par->out);
 
        li.type              = NF_LOG_TYPE_ULOG;
        li.u.ulog.copy_len   = info->len;
        li.u.ulog.group      = info->group;
        li.u.ulog.qthreshold = info->threshold;
 
-       nfulnl_log_packet(par->family, par->hooknum, skb, par->in,
+       nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in,
                          par->out, &li, info->prefix);
        return XT_CONTINUE;
 }
index a75240f0d42b8da245035a8f8ee59b51895c445e..7011c71646f0266eb75c856bc49fea7b5030bd52 100644 (file)
@@ -45,17 +45,22 @@ optlen(const u_int8_t *opt, unsigned int offset)
 
 static int
 tcpmss_mangle_packet(struct sk_buff *skb,
-                    const struct xt_tcpmss_info *info,
+                    const struct xt_action_param *par,
                     unsigned int in_mtu,
                     unsigned int tcphoff,
                     unsigned int minlen)
 {
+       const struct xt_tcpmss_info *info = par->targinfo;
        struct tcphdr *tcph;
        unsigned int tcplen, i;
        __be16 oldval;
        u16 newmss;
        u8 *opt;
 
+       /* This is a fragment, no TCP header is available */
+       if (par->fragoff != 0)
+               return XT_CONTINUE;
+
        if (!skb_make_writable(skb, skb->len))
                return -1;
 
@@ -125,6 +130,18 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 
        skb_put(skb, TCPOLEN_MSS);
 
+       /*
+        * IPv4: RFC 1122 states "If an MSS option is not received at
+        * connection setup, TCP MUST assume a default send MSS of 536".
+        * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
+        * length IPv6 header of 60, ergo the default MSS value is 1220
+        * Since no MSS was provided, we must use the default values
+        */
+       if (par->family == NFPROTO_IPV4)
+               newmss = min(newmss, (u16)536);
+       else
+               newmss = min(newmss, (u16)1220);
+
        opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
        memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
 
@@ -182,7 +199,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        __be16 newlen;
        int ret;
 
-       ret = tcpmss_mangle_packet(skb, par->targinfo,
+       ret = tcpmss_mangle_packet(skb, par,
                                   tcpmss_reverse_mtu(skb, PF_INET),
                                   iph->ihl * 4,
                                   sizeof(*iph) + sizeof(struct tcphdr));
@@ -211,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
        if (tcphoff < 0)
                return NF_DROP;
-       ret = tcpmss_mangle_packet(skb, par->targinfo,
+       ret = tcpmss_mangle_packet(skb, par,
                                   tcpmss_reverse_mtu(skb, PF_INET6),
                                   tcphoff,
                                   sizeof(*ipv6h) + sizeof(struct tcphdr));
index 25fd1c4e1eec3229e8629420e6750ebe74bb6bea..b68fa191710fe02bdb1b09f825ee6330c9bf570b 100644 (file)
@@ -30,18 +30,31 @@ static inline unsigned int optlen(const u_int8_t *opt, unsigned int offset)
 
 static unsigned int
 tcpoptstrip_mangle_packet(struct sk_buff *skb,
-                         const struct xt_tcpoptstrip_target_info *info,
+                         const struct xt_action_param *par,
                          unsigned int tcphoff, unsigned int minlen)
 {
+       const struct xt_tcpoptstrip_target_info *info = par->targinfo;
        unsigned int optl, i, j;
        struct tcphdr *tcph;
        u_int16_t n, o;
        u_int8_t *opt;
+       int len;
+
+       /* This is a fragment, no TCP header is available */
+       if (par->fragoff != 0)
+               return XT_CONTINUE;
 
        if (!skb_make_writable(skb, skb->len))
                return NF_DROP;
 
+       len = skb->len - tcphoff;
+       if (len < (int)sizeof(struct tcphdr))
+               return NF_DROP;
+
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+       if (tcph->doff * 4 > len)
+               return NF_DROP;
+
        opt  = (u_int8_t *)tcph;
 
        /*
@@ -76,7 +89,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
 static unsigned int
 tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb),
+       return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb),
               sizeof(struct iphdr) + sizeof(struct tcphdr));
 }
 
@@ -94,7 +107,7 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        if (tcphoff < 0)
                return NF_DROP;
 
-       return tcpoptstrip_mangle_packet(skb, par->targinfo, tcphoff,
+       return tcpoptstrip_mangle_packet(skb, par, tcphoff,
               sizeof(*ipv6h) + sizeof(struct tcphdr));
 }
 #endif
index bd93e51d30acc4eded7e5e4da0898e0838dd7c43..292934d234822a4a0b0088e030b316de05eee9bc 100644 (file)
@@ -200,7 +200,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 static int tee_netdev_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct xt_tee_priv *priv;
 
        priv = container_of(this, struct xt_tee_priv, notifier);
index 49c5ff7f6dd67fdbec03b360323aa102ff59e37a..68ff29f608679f598f718e3018bdd8e516963c2a 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/ip6_fib.h>
 #endif
 
+#include <linux/netfilter_ipv6.h>
 #include <linux/netfilter/xt_addrtype.h>
 #include <linux/netfilter/x_tables.h>
 
@@ -33,12 +34,12 @@ MODULE_ALIAS("ip6t_addrtype");
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
-                           const struct in6_addr *addr)
+                           const struct in6_addr *addr, u16 mask)
 {
        const struct nf_afinfo *afinfo;
        struct flowi6 flow;
        struct rt6_info *rt;
-       u32 ret;
+       u32 ret = 0;
        int route_err;
 
        memset(&flow, 0, sizeof(flow));
@@ -49,12 +50,19 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
        rcu_read_lock();
 
        afinfo = nf_get_afinfo(NFPROTO_IPV6);
-       if (afinfo != NULL)
+       if (afinfo != NULL) {
+               const struct nf_ipv6_ops *v6ops;
+
+               if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
+                       v6ops = nf_get_ipv6_ops();
+                       if (v6ops && v6ops->chk_addr(net, addr, dev, true))
+                               ret = XT_ADDRTYPE_LOCAL;
+               }
                route_err = afinfo->route(net, (struct dst_entry **)&rt,
-                                       flowi6_to_flowi(&flow), !!dev);
-       else
+                                         flowi6_to_flowi(&flow), false);
+       } else {
                route_err = 1;
-
+       }
        rcu_read_unlock();
 
        if (route_err)
@@ -62,15 +70,12 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 
        if (rt->rt6i_flags & RTF_REJECT)
                ret = XT_ADDRTYPE_UNREACHABLE;
-       else
-               ret = 0;
 
-       if (rt->rt6i_flags & RTF_LOCAL)
+       if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
                ret |= XT_ADDRTYPE_LOCAL;
        if (rt->rt6i_flags & RTF_ANYCAST)
                ret |= XT_ADDRTYPE_ANYCAST;
 
-
        dst_release(&rt->dst);
        return ret;
 }
@@ -90,7 +95,7 @@ static bool match_type6(struct net *net, const struct net_device *dev,
 
        if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
             XT_ADDRTYPE_UNREACHABLE) & mask)
-               return !!(mask & match_lookup_rt6(net, dev, addr));
+               return !!(mask & match_lookup_rt6(net, dev, addr, mask));
        return true;
 }
 
index ed0db15ab00e3c4f07f5931e27bc1f657988934c..7720b036d76a84c949080c8c32827b604c80da64 100644 (file)
@@ -18,7 +18,7 @@ static bool
 xt_rateest_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct xt_rateest_match_info *info = par->matchinfo;
-       struct gnet_stats_rate_est *r;
+       struct gnet_stats_rate_est64 *r;
        u_int32_t bps1, bps2, pps1, pps2;
        bool ret = true;
 
index 63b2bdb59e955fd012de3787f9e52279169fe4bc..02704245710e8dbc500d7e5d5c428c6d8bbfd748 100644 (file)
@@ -107,7 +107,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
-       struct sock *sk;
+       struct sock *sk = skb->sk;
        __be32 uninitialized_var(daddr), uninitialized_var(saddr);
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        u8 uninitialized_var(protocol);
@@ -155,9 +155,11 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        }
 #endif
 
-       sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
-                                  saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
-       if (sk != NULL) {
+       if (!sk)
+               sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
+                                          saddr, daddr, sport, dport,
+                                          par->in, NFT_LOOKUP_ANY);
+       if (sk) {
                bool wildcard;
                bool transparent = true;
 
@@ -173,7 +175,8 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                                       (sk->sk_state == TCP_TIME_WAIT &&
                                        inet_twsk(sk)->tw_transparent));
 
-               xt_socket_put_sk(sk);
+               if (sk != skb->sk)
+                       xt_socket_put_sk(sk);
 
                if (wildcard || !transparent)
                        sk = NULL;
@@ -260,7 +263,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
 {
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
-       struct sock *sk;
+       struct sock *sk = skb->sk;
        struct in6_addr *daddr = NULL, *saddr = NULL;
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        int thoff = 0, uninitialized_var(tproto);
@@ -291,9 +294,11 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
                return false;
        }
 
-       sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
-                                  saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
-       if (sk != NULL) {
+       if (!sk)
+               sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+                                          saddr, daddr, sport, dport,
+                                          par->in, NFT_LOOKUP_ANY);
+       if (sk) {
                bool wildcard;
                bool transparent = true;
 
@@ -309,7 +314,8 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
                                       (sk->sk_state == TCP_TIME_WAIT &&
                                        inet_twsk(sk)->tw_transparent));
 
-               xt_socket_put_sk(sk);
+               if (sk != skb->sk)
+                       xt_socket_put_sk(sk);
 
                if (wildcard || !transparent)
                        sk = NULL;
index d8d42433755051023320299f919f69db9e58ec29..6bb1d42f0fac04c02a69ee1790d40baf7af53824 100644 (file)
@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
        }
 }
 
+/**
+ * netlbl_domhsh_validate - Validate a new domain mapping entry
+ * @entry: the entry to validate
+ *
+ * This function validates the new domain mapping entry to ensure that it is
+ * a valid entry.  Returns zero on success, negative values on failure.
+ *
+ */
+static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
+{
+       struct netlbl_af4list *iter4;
+       struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct netlbl_af6list *iter6;
+       struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
+
+       if (entry == NULL)
+               return -EINVAL;
+
+       switch (entry->type) {
+       case NETLBL_NLTYPE_UNLABELED:
+               if (entry->type_def.cipsov4 != NULL ||
+                   entry->type_def.addrsel != NULL)
+                       return -EINVAL;
+               break;
+       case NETLBL_NLTYPE_CIPSOV4:
+               if (entry->type_def.cipsov4 == NULL)
+                       return -EINVAL;
+               break;
+       case NETLBL_NLTYPE_ADDRSELECT:
+               netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
+                       map4 = netlbl_domhsh_addr4_entry(iter4);
+                       switch (map4->type) {
+                       case NETLBL_NLTYPE_UNLABELED:
+                               if (map4->type_def.cipsov4 != NULL)
+                                       return -EINVAL;
+                               break;
+                       case NETLBL_NLTYPE_CIPSOV4:
+                               if (map4->type_def.cipsov4 == NULL)
+                                       return -EINVAL;
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+#if IS_ENABLED(CONFIG_IPV6)
+               netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
+                       map6 = netlbl_domhsh_addr6_entry(iter6);
+                       switch (map6->type) {
+                       case NETLBL_NLTYPE_UNLABELED:
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+#endif /* IPv6 */
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /*
  * Domain Hash Table Functions
  */
@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
 
+       ret_val = netlbl_domhsh_validate(entry);
+       if (ret_val != 0)
+               return ret_val;
+
        /* XXX - we can remove this RCU read lock as the spinlock protects the
         *       entire function, but before we do we need to fixup the
         *       netlbl_af[4,6]list RCU functions to do "the right thing" with
index 8a6c6ea466d874dd34afd2013bef4f777a47db42..af3531926ee0c0339c6cc73725c50a1d7ec2a18d 100644 (file)
@@ -708,7 +708,7 @@ unlhsh_remove_return:
  * netlbl_unlhsh_netdev_handler - Network device notification handler
  * @this: notifier block
  * @event: the event
- * @ptr: the network device (cast to void)
+ * @ptr: the netdevice notifier info (cast to void)
  *
  * Description:
  * Handle network device events, although at present all we care about is a
@@ -717,10 +717,9 @@ unlhsh_remove_return:
  *
  */
 static int netlbl_unlhsh_netdev_handler(struct notifier_block *this,
-                                       unsigned long event,
-                                       void *ptr)
+                                       unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct netlbl_unlhsh_iface *iface = NULL;
 
        if (!net_eq(dev_net(dev), &init_net))
index 12ac6b47a35c45fe0abb5fce87ef07dfdf1ba1f7..6967fbcca6c564e23616c33ca52c06d95869f591 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/audit.h>
 #include <linux/mutex.h>
 #include <linux/vmalloc.h>
+#include <linux/if_arp.h>
 #include <asm/cacheflush.h>
 
 #include <net/net_namespace.h>
@@ -101,6 +102,9 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
+static DEFINE_SPINLOCK(netlink_tap_lock);
+static struct list_head netlink_tap_all __read_mostly;
+
 static inline u32 netlink_group_mask(u32 group)
 {
        return group ? 1 << (group - 1) : 0;
@@ -111,6 +115,100 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u
        return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
 }
 
+int netlink_add_tap(struct netlink_tap *nt)
+{
+       if (unlikely(nt->dev->type != ARPHRD_NETLINK))
+               return -EINVAL;
+
+       spin_lock(&netlink_tap_lock);
+       list_add_rcu(&nt->list, &netlink_tap_all);
+       spin_unlock(&netlink_tap_lock);
+
+       if (nt->module)
+               __module_get(nt->module);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(netlink_add_tap);
+
+int __netlink_remove_tap(struct netlink_tap *nt)
+{
+       bool found = false;
+       struct netlink_tap *tmp;
+
+       spin_lock(&netlink_tap_lock);
+
+       list_for_each_entry(tmp, &netlink_tap_all, list) {
+               if (nt == tmp) {
+                       list_del_rcu(&nt->list);
+                       found = true;
+                       goto out;
+               }
+       }
+
+       pr_warn("__netlink_remove_tap: %p not found\n", nt);
+out:
+       spin_unlock(&netlink_tap_lock);
+
+       if (found && nt->module)
+               module_put(nt->module);
+
+       return found ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(__netlink_remove_tap);
+
+int netlink_remove_tap(struct netlink_tap *nt)
+{
+       int ret;
+
+       ret = __netlink_remove_tap(nt);
+       synchronize_net();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(netlink_remove_tap);
+
+static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+                                    struct net_device *dev)
+{
+       struct sk_buff *nskb;
+       int ret = -ENOMEM;
+
+       dev_hold(dev);
+       nskb = skb_clone(skb, GFP_ATOMIC);
+       if (nskb) {
+               nskb->dev = dev;
+               ret = dev_queue_xmit(nskb);
+               if (unlikely(ret > 0))
+                       ret = net_xmit_errno(ret);
+       }
+
+       dev_put(dev);
+       return ret;
+}
+
+static void __netlink_deliver_tap(struct sk_buff *skb)
+{
+       int ret;
+       struct netlink_tap *tmp;
+
+       list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
+               ret = __netlink_deliver_tap_skb(skb, tmp->dev);
+               if (unlikely(ret))
+                       break;
+       }
+}
+
+static void netlink_deliver_tap(struct sk_buff *skb)
+{
+       rcu_read_lock();
+
+       if (unlikely(!list_empty(&netlink_tap_all)))
+               __netlink_deliver_tap(skb);
+
+       rcu_read_unlock();
+}
+
 static void netlink_overrun(struct sock *sk)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -371,7 +469,7 @@ static int netlink_mmap(struct file *file, struct socket *sock,
        err = 0;
 out:
        mutex_unlock(&nlk->pg_vec_lock);
-       return 0;
+       return err;
 }
 
 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
@@ -747,9 +845,13 @@ static void netlink_skb_destructor(struct sk_buff *skb)
                atomic_dec(&ring->pending);
                sock_put(sk);
 
-               skb->data = NULL;
+               skb->head = NULL;
        }
 #endif
+       if (is_vmalloc_addr(skb->head)) {
+               vfree(skb->head);
+               skb->head = NULL;
+       }
        if (skb->sk != NULL)
                sock_rfree(skb);
 }
@@ -854,16 +956,23 @@ netlink_unlock_table(void)
                wake_up(&nl_table_wait);
 }
 
+static bool netlink_compare(struct net *net, struct sock *sk)
+{
+       return net_eq(sock_net(sk), net);
+}
+
 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 {
-       struct nl_portid_hash *hash = &nl_table[protocol].hash;
+       struct netlink_table *table = &nl_table[protocol];
+       struct nl_portid_hash *hash = &table->hash;
        struct hlist_head *head;
        struct sock *sk;
 
        read_lock(&nl_table_lock);
        head = nl_portid_hashfn(hash, portid);
        sk_for_each(sk, head) {
-               if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
+               if (table->compare(net, sk) &&
+                   (nlk_sk(sk)->portid == portid)) {
                        sock_hold(sk);
                        goto found;
                }
@@ -976,7 +1085,8 @@ netlink_update_listeners(struct sock *sk)
 
 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
 {
-       struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
+       struct netlink_table *table = &nl_table[sk->sk_protocol];
+       struct nl_portid_hash *hash = &table->hash;
        struct hlist_head *head;
        int err = -EADDRINUSE;
        struct sock *osk;
@@ -986,7 +1096,8 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
        head = nl_portid_hashfn(hash, portid);
        len = 0;
        sk_for_each(osk, head) {
-               if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
+               if (table->compare(net, osk) &&
+                   (nlk_sk(osk)->portid == portid))
                        break;
                len++;
        }
@@ -1183,7 +1294,8 @@ static int netlink_autobind(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
-       struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
+       struct netlink_table *table = &nl_table[sk->sk_protocol];
+       struct nl_portid_hash *hash = &table->hash;
        struct hlist_head *head;
        struct sock *osk;
        s32 portid = task_tgid_vnr(current);
@@ -1195,7 +1307,7 @@ retry:
        netlink_table_grab();
        head = nl_portid_hashfn(hash, portid);
        sk_for_each(osk, head) {
-               if (!net_eq(sock_net(osk), net))
+               if (!table->compare(net, osk))
                        continue;
                if (nlk_sk(osk)->portid == portid) {
                        /* Bind collision, search negative portid values. */
@@ -1420,6 +1532,35 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
        return sock;
 }
 
+static struct sk_buff *netlink_alloc_large_skb(unsigned int size)
+{
+       struct sk_buff *skb;
+       void *data;
+
+       if (size <= NLMSG_GOODSIZE)
+               return alloc_skb(size, GFP_KERNEL);
+
+       skb = alloc_skb_head(GFP_KERNEL);
+       if (skb == NULL)
+               return NULL;
+
+       data = vmalloc(size);
+       if (data == NULL)
+               goto err;
+
+       skb->head       = data;
+       skb->data       = data;
+       skb_reset_tail_pointer(skb);
+       skb->end        = skb->tail + size;
+       skb->len        = 0;
+       skb->destructor = netlink_skb_destructor;
+
+       return skb;
+err:
+       kfree_skb(skb);
+       return NULL;
+}
+
 /*
  * Attach a skb to a netlink socket.
  * The caller must hold a reference to the destination socket. On error, the
@@ -1475,6 +1616,8 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
 {
        int len = skb->len;
 
+       netlink_deliver_tap(skb);
+
 #ifdef CONFIG_NETLINK_MMAP
        if (netlink_skb_is_mmaped(skb))
                netlink_queue_mmaped_skb(sk, skb);
@@ -1510,7 +1653,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
                return skb;
 
        delta = skb->end - skb->tail;
-       if (delta * 2 < skb->truesize)
+       if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
                return skb;
 
        if (skb_shared(skb)) {
@@ -1535,6 +1678,11 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
 
        ret = -ECONNREFUSED;
        if (nlk->netlink_rcv != NULL) {
+               /* We could do a netlink_deliver_tap(skb) here as well
+                * but since this is intended for the kernel only, we
+                * should rather let it stay under the hood.
+                */
+
                ret = skb->len;
                netlink_skb_set_owner_r(skb, sk);
                NETLINK_CB(skb).sk = ssk;
@@ -2096,7 +2244,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (len > sk->sk_sndbuf - 32)
                goto out;
        err = -ENOBUFS;
-       skb = alloc_skb(len, GFP_KERNEL);
+       skb = netlink_alloc_large_skb(len);
        if (skb == NULL)
                goto out;
 
@@ -2285,6 +2433,8 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
                if (cfg) {
                        nl_table[unit].bind = cfg->bind;
                        nl_table[unit].flags = cfg->flags;
+                       if (cfg->compare)
+                               nl_table[unit].compare = cfg->compare;
                }
                nl_table[unit].registered = 1;
        } else {
@@ -2707,6 +2857,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct sock *s;
        struct nl_seq_iter *iter;
+       struct net *net;
        int i, j;
 
        ++*pos;
@@ -2714,11 +2865,12 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        if (v == SEQ_START_TOKEN)
                return netlink_seq_socket_idx(seq, 0);
 
+       net = seq_file_net(seq);
        iter = seq->private;
        s = v;
        do {
                s = sk_next(s);
-       } while (s && sock_net(s) != seq_file_net(seq));
+       } while (s && !nl_table[s->sk_protocol].compare(net, s));
        if (s)
                return s;
 
@@ -2730,7 +2882,8 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
                for (; j <= hash->mask; j++) {
                        s = sk_head(&hash->table[j]);
-                       while (s && sock_net(s) != seq_file_net(seq))
+
+                       while (s && !nl_table[s->sk_protocol].compare(net, s))
                                s = sk_next(s);
                        if (s) {
                                iter->link = i;
@@ -2923,8 +3076,12 @@ static int __init netlink_proto_init(void)
                hash->shift = 0;
                hash->mask = 0;
                hash->rehash_time = jiffies;
+
+               nl_table[i].compare = netlink_compare;
        }
 
+       INIT_LIST_HEAD(&netlink_tap_all);
+
        netlink_add_usersock_entry();
 
        sock_register(&netlink_family_ops);
index ed8522265f4eb08ecedf9e7fd6f37386beb6dddd..eaa88d187cdcebc65152296213c3a82aaa6ab5ad 100644 (file)
@@ -73,6 +73,7 @@ struct netlink_table {
        struct mutex            *cb_mutex;
        struct module           *module;
        void                    (*bind)(int group);
+       bool                    (*compare)(struct net *net, struct sock *sock);
        int                     registered;
 };
 
index ec0c80fde69f8e23f205d8cb17616c1486e9ce01..698814bfa7adfd4a58e9ec13f24eeb341ea86e08 100644 (file)
@@ -117,7 +117,7 @@ static void nr_kill_by_device(struct net_device *dev)
  */
 static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
index 42f630b9a6981d4f5616db8792a76831558dede5..ba1c368b3f186e140149a75e8d98dee24587a020 100644 (file)
@@ -34,7 +34,7 @@ static int min_reset[]   = {0}, max_reset[]   = {1};
 
 static struct ctl_table_header *nr_table_header;
 
-static ctl_table nr_table[] = {
+static struct ctl_table nr_table[] = {
        {
                .procname       = "default_path_quality",
                .data           = &sysctl_netrom_default_path_quality,
index d9ea33c361be5c795c8eabecce287cb5195f45a2..9fbc04a31ed6d10e8c8a5e12e3401bcc0fd25f16 100644 (file)
@@ -19,6 +19,8 @@ config OPENVSWITCH
          which is able to accept configuration from a variety of sources and
          translate it into packet processing rules.
 
+         Open vSwitch GRE support depends on CONFIG_NET_IPGRE_DEMUX.
+
          See http://openvswitch.org for more information and userspace
          utilities.
 
index 15e7384745c1f712e9603acf84e67ff20471c771..01bddb2991e3578e74ee7894bc6eccb175905002 100644 (file)
@@ -10,5 +10,6 @@ openvswitch-y := \
        dp_notify.o \
        flow.o \
        vport.o \
+       vport-gre.o \
        vport-internal_dev.o \
-       vport-netdev.o \
+       vport-netdev.o
index 894b6cbdd9295841e6782268b8743fcb63358391..22c5f399f1cf8119f13064559099ad7c30aae93d 100644 (file)
@@ -130,9 +130,13 @@ static int set_eth_addr(struct sk_buff *skb,
        if (unlikely(err))
                return err;
 
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+
        memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
        memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
 
+       ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+
        return 0;
 }
 
@@ -432,6 +436,10 @@ static int execute_set_action(struct sk_buff *skb,
                skb->mark = nla_get_u32(nested_attr);
                break;
 
+       case OVS_KEY_ATTR_IPV4_TUNNEL:
+               OVS_CB(skb)->tun_key = nla_data(nested_attr);
+               break;
+
        case OVS_KEY_ATTR_ETHERNET:
                err = set_eth_addr(skb, nla_data(nested_attr));
                break;
index d12d6b8b5e8b1e4cd7d0f6504882fb47a866c52c..f7e3a0d84c40488cd6744dce31c90bd0d942bd3b 100644 (file)
@@ -362,6 +362,14 @@ static int queue_gso_packets(struct net *net, int dp_ifindex,
 static size_t key_attr_size(void)
 {
        return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
+               + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
+                 + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
+                 + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
+                 + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
+                 + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
+                 + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
+                 + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
+                 + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
                + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
                + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
                + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
@@ -464,16 +472,89 @@ static int flush_flows(struct datapath *dp)
        return 0;
 }
 
-static int validate_actions(const struct nlattr *attr,
-                               const struct sw_flow_key *key, int depth);
+static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
+{
+
+       struct sw_flow_actions *acts;
+       int new_acts_size;
+       int req_size = NLA_ALIGN(attr_len);
+       int next_offset = offsetof(struct sw_flow_actions, actions) +
+                                       (*sfa)->actions_len;
+
+       if (req_size <= (ksize(*sfa) - next_offset))
+               goto out;
+
+       new_acts_size = ksize(*sfa) * 2;
+
+       if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
+                       return ERR_PTR(-EMSGSIZE);
+               new_acts_size = MAX_ACTIONS_BUFSIZE;
+       }
+
+       acts = ovs_flow_actions_alloc(new_acts_size);
+       if (IS_ERR(acts))
+               return (void *)acts;
+
+       memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
+       acts->actions_len = (*sfa)->actions_len;
+       kfree(*sfa);
+       *sfa = acts;
+
+out:
+       (*sfa)->actions_len += req_size;
+       return  (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
+}
+
+static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
+{
+       struct nlattr *a;
+
+       a = reserve_sfa_size(sfa, nla_attr_size(len));
+       if (IS_ERR(a))
+               return PTR_ERR(a);
+
+       a->nla_type = attrtype;
+       a->nla_len = nla_attr_size(len);
+
+       if (data)
+               memcpy(nla_data(a), data, len);
+       memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
+
+       return 0;
+}
+
+static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
+{
+       int used = (*sfa)->actions_len;
+       int err;
+
+       err = add_action(sfa, attrtype, NULL, 0);
+       if (err)
+               return err;
+
+       return used;
+}
 
-static int validate_sample(const struct nlattr *attr,
-                               const struct sw_flow_key *key, int depth)
+static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
+{
+       struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
+
+       a->nla_len = sfa->actions_len - st_offset;
+}
+
+static int validate_and_copy_actions(const struct nlattr *attr,
+                                    const struct sw_flow_key *key, int depth,
+                                    struct sw_flow_actions **sfa);
+
+static int validate_and_copy_sample(const struct nlattr *attr,
+                                   const struct sw_flow_key *key, int depth,
+                                   struct sw_flow_actions **sfa)
 {
        const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
        const struct nlattr *probability, *actions;
        const struct nlattr *a;
-       int rem;
+       int rem, start, err, st_acts;
 
        memset(attrs, 0, sizeof(attrs));
        nla_for_each_nested(a, attr, rem) {
@@ -492,7 +573,26 @@ static int validate_sample(const struct nlattr *attr,
        actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
        if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
                return -EINVAL;
-       return validate_actions(actions, key, depth + 1);
+
+       /* validation done, copy sample action. */
+       start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
+       if (start < 0)
+               return start;
+       err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
+       if (err)
+               return err;
+       st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
+       if (st_acts < 0)
+               return st_acts;
+
+       err = validate_and_copy_actions(actions, key, depth + 1, sfa);
+       if (err)
+               return err;
+
+       add_nested_action_end(*sfa, st_acts);
+       add_nested_action_end(*sfa, start);
+
+       return 0;
 }
 
 static int validate_tp_port(const struct sw_flow_key *flow_key)
@@ -508,8 +608,30 @@ static int validate_tp_port(const struct sw_flow_key *flow_key)
        return -EINVAL;
 }
 
+static int validate_and_copy_set_tun(const struct nlattr *attr,
+                                    struct sw_flow_actions **sfa)
+{
+       struct ovs_key_ipv4_tunnel tun_key;
+       int err, start;
+
+       err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &tun_key);
+       if (err)
+               return err;
+
+       start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
+       if (start < 0)
+               return start;
+
+       err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key));
+       add_nested_action_end(*sfa, start);
+
+       return err;
+}
+
 static int validate_set(const struct nlattr *a,
-                       const struct sw_flow_key *flow_key)
+                       const struct sw_flow_key *flow_key,
+                       struct sw_flow_actions **sfa,
+                       bool *set_tun)
 {
        const struct nlattr *ovs_key = nla_data(a);
        int key_type = nla_type(ovs_key);
@@ -519,18 +641,27 @@ static int validate_set(const struct nlattr *a,
                return -EINVAL;
 
        if (key_type > OVS_KEY_ATTR_MAX ||
-           nla_len(ovs_key) != ovs_key_lens[key_type])
+          (ovs_key_lens[key_type] != nla_len(ovs_key) &&
+           ovs_key_lens[key_type] != -1))
                return -EINVAL;
 
        switch (key_type) {
        const struct ovs_key_ipv4 *ipv4_key;
        const struct ovs_key_ipv6 *ipv6_key;
+       int err;
 
        case OVS_KEY_ATTR_PRIORITY:
        case OVS_KEY_ATTR_SKB_MARK:
        case OVS_KEY_ATTR_ETHERNET:
                break;
 
+       case OVS_KEY_ATTR_TUNNEL:
+               *set_tun = true;
+               err = validate_and_copy_set_tun(a, sfa);
+               if (err)
+                       return err;
+               break;
+
        case OVS_KEY_ATTR_IPV4:
                if (flow_key->eth.type != htons(ETH_P_IP))
                        return -EINVAL;
@@ -606,8 +737,24 @@ static int validate_userspace(const struct nlattr *attr)
        return 0;
 }
 
-static int validate_actions(const struct nlattr *attr,
-                               const struct sw_flow_key *key,  int depth)
+static int copy_action(const struct nlattr *from,
+                      struct sw_flow_actions **sfa)
+{
+       int totlen = NLA_ALIGN(from->nla_len);
+       struct nlattr *to;
+
+       to = reserve_sfa_size(sfa, from->nla_len);
+       if (IS_ERR(to))
+               return PTR_ERR(to);
+
+       memcpy(to, from, totlen);
+       return 0;
+}
+
+static int validate_and_copy_actions(const struct nlattr *attr,
+                                    const struct sw_flow_key *key,
+                                    int depth,
+                                    struct sw_flow_actions **sfa)
 {
        const struct nlattr *a;
        int rem, err;
@@ -627,12 +774,14 @@ static int validate_actions(const struct nlattr *attr,
                };
                const struct ovs_action_push_vlan *vlan;
                int type = nla_type(a);
+               bool skip_copy;
 
                if (type > OVS_ACTION_ATTR_MAX ||
                    (action_lens[type] != nla_len(a) &&
                     action_lens[type] != (u32)-1))
                        return -EINVAL;
 
+               skip_copy = false;
                switch (type) {
                case OVS_ACTION_ATTR_UNSPEC:
                        return -EINVAL;
@@ -661,20 +810,26 @@ static int validate_actions(const struct nlattr *attr,
                        break;
 
                case OVS_ACTION_ATTR_SET:
-                       err = validate_set(a, key);
+                       err = validate_set(a, key, sfa, &skip_copy);
                        if (err)
                                return err;
                        break;
 
                case OVS_ACTION_ATTR_SAMPLE:
-                       err = validate_sample(a, key, depth);
+                       err = validate_and_copy_sample(a, key, depth, sfa);
                        if (err)
                                return err;
+                       skip_copy = true;
                        break;
 
                default:
                        return -EINVAL;
                }
+               if (!skip_copy) {
+                       err = copy_action(a, sfa);
+                       if (err)
+                               return err;
+               }
        }
 
        if (rem > 0)
@@ -739,24 +894,18 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        if (err)
                goto err_flow_free;
 
-       err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
-                                            &flow->key.phy.skb_mark,
-                                            &flow->key.phy.in_port,
-                                            a[OVS_PACKET_ATTR_KEY]);
-       if (err)
-               goto err_flow_free;
-
-       err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
+       err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
        if (err)
                goto err_flow_free;
-
-       flow->hash = ovs_flow_hash(&flow->key, key_len);
-
-       acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+       acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
        err = PTR_ERR(acts);
        if (IS_ERR(acts))
                goto err_flow_free;
+
+       err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
        rcu_assign_pointer(flow->sf_acts, acts);
+       if (err)
+               goto err_flow_free;
 
        OVS_CB(packet)->flow = flow;
        packet->priority = flow->key.phy.priority;
@@ -846,6 +995,99 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = {
        .name = OVS_FLOW_MCGROUP
 };
 
+static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
+static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
+{
+       const struct nlattr *a;
+       struct nlattr *start;
+       int err = 0, rem;
+
+       start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
+       if (!start)
+               return -EMSGSIZE;
+
+       nla_for_each_nested(a, attr, rem) {
+               int type = nla_type(a);
+               struct nlattr *st_sample;
+
+               switch (type) {
+               case OVS_SAMPLE_ATTR_PROBABILITY:
+                       if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
+                               return -EMSGSIZE;
+                       break;
+               case OVS_SAMPLE_ATTR_ACTIONS:
+                       st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
+                       if (!st_sample)
+                               return -EMSGSIZE;
+                       err = actions_to_attr(nla_data(a), nla_len(a), skb);
+                       if (err)
+                               return err;
+                       nla_nest_end(skb, st_sample);
+                       break;
+               }
+       }
+
+       nla_nest_end(skb, start);
+       return err;
+}
+
+static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
+{
+       const struct nlattr *ovs_key = nla_data(a);
+       int key_type = nla_type(ovs_key);
+       struct nlattr *start;
+       int err;
+
+       switch (key_type) {
+       case OVS_KEY_ATTR_IPV4_TUNNEL:
+               start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
+               if (!start)
+                       return -EMSGSIZE;
+
+               err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key));
+               if (err)
+                       return err;
+               nla_nest_end(skb, start);
+               break;
+       default:
+               if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
+                       return -EMSGSIZE;
+               break;
+       }
+
+       return 0;
+}
+
+static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
+{
+       const struct nlattr *a;
+       int rem, err;
+
+       nla_for_each_attr(a, attr, len, rem) {
+               int type = nla_type(a);
+
+               switch (type) {
+               case OVS_ACTION_ATTR_SET:
+                       err = set_action_to_attr(a, skb);
+                       if (err)
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_SAMPLE:
+                       err = sample_action_to_attr(a, skb);
+                       if (err)
+                               return err;
+                       break;
+               default:
+                       if (nla_put(skb, type, nla_len(a), nla_data(a)))
+                               return -EMSGSIZE;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
 {
        return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -863,6 +1105,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
 {
        const int skb_orig_len = skb->len;
        const struct sw_flow_actions *sf_acts;
+       struct nlattr *start;
        struct ovs_flow_stats stats;
        struct ovs_header *ovs_header;
        struct nlattr *nla;
@@ -916,10 +1159,19 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
         * This can only fail for dump operations because the skb is always
         * properly sized for single flows.
         */
-       err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
-                     sf_acts->actions);
-       if (err < 0 && skb_orig_len)
-               goto error;
+       start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
+       if (start) {
+               err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
+               if (!err)
+                       nla_nest_end(skb, start);
+               else {
+                       if (skb_orig_len)
+                               goto error;
+
+                       nla_nest_cancel(skb, start);
+               }
+       } else if (skb_orig_len)
+               goto nla_put_failure;
 
        return genlmsg_end(skb, ovs_header);
 
@@ -964,6 +1216,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *reply;
        struct datapath *dp;
        struct flow_table *table;
+       struct sw_flow_actions *acts = NULL;
        int error;
        int key_len;
 
@@ -977,9 +1230,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
 
        /* Validate actions. */
        if (a[OVS_FLOW_ATTR_ACTIONS]) {
-               error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
-               if (error)
+               acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
+               error = PTR_ERR(acts);
+               if (IS_ERR(acts))
                        goto error;
+
+               error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0, &acts);
+               if (error)
+                       goto err_kfree;
        } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
                error = -EINVAL;
                goto error;
@@ -994,8 +1252,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
        table = ovsl_dereference(dp->table);
        flow = ovs_flow_tbl_lookup(table, &key, key_len);
        if (!flow) {
-               struct sw_flow_actions *acts;
-
                /* Bail out if we're not allowed to create a new flow. */
                error = -ENOENT;
                if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
@@ -1019,19 +1275,12 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                        error = PTR_ERR(flow);
                        goto err_unlock_ovs;
                }
-               flow->key = key;
                clear_stats(flow);
 
-               /* Obtain actions. */
-               acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
-               error = PTR_ERR(acts);
-               if (IS_ERR(acts))
-                       goto error_free_flow;
                rcu_assign_pointer(flow->sf_acts, acts);
 
                /* Put flow in bucket. */
-               flow->hash = ovs_flow_hash(&key, key_len);
-               ovs_flow_tbl_insert(table, flow);
+               ovs_flow_tbl_insert(table, flow, &key, key_len);
 
                reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                                info->snd_seq,
@@ -1039,7 +1288,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
        } else {
                /* We found a matching flow. */
                struct sw_flow_actions *old_acts;
-               struct nlattr *acts_attrs;
 
                /* Bail out if we're not allowed to modify an existing flow.
                 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
@@ -1054,21 +1302,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
 
                /* Update actions. */
                old_acts = ovsl_dereference(flow->sf_acts);
-               acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
-               if (acts_attrs &&
-                  (old_acts->actions_len != nla_len(acts_attrs) ||
-                  memcmp(old_acts->actions, nla_data(acts_attrs),
-                         old_acts->actions_len))) {
-                       struct sw_flow_actions *new_acts;
-
-                       new_acts = ovs_flow_actions_alloc(acts_attrs);
-                       error = PTR_ERR(new_acts);
-                       if (IS_ERR(new_acts))
-                               goto err_unlock_ovs;
-
-                       rcu_assign_pointer(flow->sf_acts, new_acts);
-                       ovs_flow_deferred_free_acts(old_acts);
-               }
+               rcu_assign_pointer(flow->sf_acts, acts);
+               ovs_flow_deferred_free_acts(old_acts);
 
                reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                               info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1089,10 +1324,10 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                                ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
        return 0;
 
-error_free_flow:
-       ovs_flow_free(flow);
 err_unlock_ovs:
        ovs_unlock();
+err_kfree:
+       kfree(acts);
 error:
        return error;
 }
@@ -1812,10 +2047,11 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        if (IS_ERR(vport))
                goto exit_unlock;
 
-       err = 0;
        if (a[OVS_VPORT_ATTR_TYPE] &&
-           nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
+           nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
                err = -EINVAL;
+               goto exit_unlock;
+       }
 
        reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!reply) {
@@ -1823,10 +2059,11 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock;
        }
 
-       if (!err && a[OVS_VPORT_ATTR_OPTIONS])
+       if (a[OVS_VPORT_ATTR_OPTIONS]) {
                err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
-       if (err)
-               goto exit_free;
+               if (err)
+                       goto exit_free;
+       }
 
        if (a[OVS_VPORT_ATTR_UPCALL_PID])
                vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
@@ -1867,8 +2104,8 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock;
        }
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
-                                        OVS_VPORT_CMD_DEL);
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
+                                        info->snd_seq, OVS_VPORT_CMD_DEL);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
                goto exit_unlock;
@@ -1897,8 +2134,8 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
        if (IS_ERR(vport))
                goto exit_unlock;
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
-                                        OVS_VPORT_CMD_NEW);
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
+                                        info->snd_seq, OVS_VPORT_CMD_NEW);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
                goto exit_unlock;
index 16b8406952164e4c264a859f23f53e3bbe300740..a91486484916b4e5aa69bd82ee95d4a76cc25ce3 100644 (file)
@@ -88,9 +88,12 @@ struct datapath {
 /**
  * struct ovs_skb_cb - OVS data in skb CB
  * @flow: The flow associated with this packet.  May be %NULL if no flow.
+ * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the
+ * packet is not being tunneled.
  */
 struct ovs_skb_cb {
        struct sw_flow          *flow;
+       struct ovs_key_ipv4_tunnel  *tun_key;
 };
 #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
 
@@ -119,6 +122,7 @@ struct dp_upcall_info {
 struct ovs_net {
        struct list_head dps;
        struct work_struct dp_notify_work;
+       struct vport_net vport_net;
 };
 
 extern int ovs_net_id;
index ef4feec6cd845e67903706f448ccc5d1a8f96556..c3235675f35997169ad4961722344537005a8f6e 100644 (file)
@@ -78,7 +78,7 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
                           void *ptr)
 {
        struct ovs_net *ovs_net;
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct vport *vport = NULL;
 
        if (!ovs_is_internal_dev(dev))
index b15321a2228c879a6a2bc56e950e7335be2a9507..5c519b121e1be0ba26c40e136ace7b8eba42f62e 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/icmpv6.h>
 #include <linux/rculist.h>
 #include <net/ip.h>
+#include <net/ip_tunnels.h>
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 
@@ -198,20 +199,18 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
        spin_unlock(&flow->lock);
 }
 
-struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
+struct sw_flow_actions *ovs_flow_actions_alloc(int size)
 {
-       int actions_len = nla_len(actions);
        struct sw_flow_actions *sfa;
 
-       if (actions_len > MAX_ACTIONS_BUFSIZE)
+       if (size > MAX_ACTIONS_BUFSIZE)
                return ERR_PTR(-EINVAL);
 
-       sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
+       sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
        if (!sfa)
                return ERR_PTR(-ENOMEM);
 
-       sfa->actions_len = actions_len;
-       nla_memcpy(sfa->actions, actions, actions_len);
+       sfa->actions_len = 0;
        return sfa;
 }
 
@@ -354,6 +353,14 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
        return NULL;
 }
 
+static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+{
+       struct hlist_head *head;
+       head = find_bucket(table, flow->hash);
+       hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+       table->count++;
+}
+
 static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
 {
        int old_ver;
@@ -370,7 +377,7 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new
                head = flex_array_get(old->buckets, i);
 
                hlist_for_each_entry(flow, head, hash_node[old_ver])
-                       ovs_flow_tbl_insert(new, flow);
+                       __flow_tbl_insert(new, flow);
        }
        old->keep_flows = true;
 }
@@ -590,10 +597,10 @@ out:
  *    - skb->network_header: just past the Ethernet header, or just past the
  *      VLAN header, to the first byte of the Ethernet payload.
  *
- *    - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
+ *    - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
  *      on output, then just past the IP header, if one is present and
  *      of a correct length, otherwise the same as skb->network_header.
- *      For other key->dl_type values it is left untouched.
+ *      For other key->eth.type values it is left untouched.
  */
 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
                 int *key_lenp)
@@ -605,6 +612,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
        memset(key, 0, sizeof(*key));
 
        key->phy.priority = skb->priority;
+       if (OVS_CB(skb)->tun_key)
+               memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
        key->phy.in_port = in_port;
        key->phy.skb_mark = skb->mark;
 
@@ -618,6 +627,9 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
        memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
 
        __skb_pull(skb, 2 * ETH_ALEN);
+       /* We are going to push all headers that we pull, so no need to
+        * update skb->csum here.
+        */
 
        if (vlan_tx_tag_present(skb))
                key->eth.tci = htons(skb->vlan_tci);
@@ -759,9 +771,18 @@ out:
        return error;
 }
 
-u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
+static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
+{
+       return jhash2((u32 *)((u8 *)key + key_start),
+                     DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
+}
+
+static int flow_key_start(struct sw_flow_key *key)
 {
-       return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
+       if (key->tun_key.ipv4_dst)
+               return 0;
+       else
+               return offsetof(struct sw_flow_key, phy);
 }
 
 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
@@ -769,28 +790,31 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
 {
        struct sw_flow *flow;
        struct hlist_head *head;
+       u8 *_key;
+       int key_start;
        u32 hash;
 
-       hash = ovs_flow_hash(key, key_len);
+       key_start = flow_key_start(key);
+       hash = ovs_flow_hash(key, key_start, key_len);
 
+       _key = (u8 *) key + key_start;
        head = find_bucket(table, hash);
        hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
 
                if (flow->hash == hash &&
-                   !memcmp(&flow->key, key, key_len)) {
+                   !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) {
                        return flow;
                }
        }
        return NULL;
 }
 
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+                        struct sw_flow_key *key, int key_len)
 {
-       struct hlist_head *head;
-
-       head = find_bucket(table, flow->hash);
-       hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
-       table->count++;
+       flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len);
+       memcpy(&flow->key, key, sizeof(flow->key));
+       __flow_tbl_insert(table, flow);
 }
 
 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
@@ -817,6 +841,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
        [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
        [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
        [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+       [OVS_KEY_ATTR_TUNNEL] = -1,
 };
 
 static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
@@ -954,6 +979,105 @@ static int parse_flow_nlattrs(const struct nlattr *attr,
        return 0;
 }
 
+int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
+                            struct ovs_key_ipv4_tunnel *tun_key)
+{
+       struct nlattr *a;
+       int rem;
+       bool ttl = false;
+
+       memset(tun_key, 0, sizeof(*tun_key));
+
+       nla_for_each_nested(a, attr, rem) {
+               int type = nla_type(a);
+               static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
+                       [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
+                       [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
+                       [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
+                       [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
+                       [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
+                       [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
+                       [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
+               };
+
+               if (type > OVS_TUNNEL_KEY_ATTR_MAX ||
+                       ovs_tunnel_key_lens[type] != nla_len(a))
+                       return -EINVAL;
+
+               switch (type) {
+               case OVS_TUNNEL_KEY_ATTR_ID:
+                       tun_key->tun_id = nla_get_be64(a);
+                       tun_key->tun_flags |= TUNNEL_KEY;
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
+                       tun_key->ipv4_src = nla_get_be32(a);
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
+                       tun_key->ipv4_dst = nla_get_be32(a);
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_TOS:
+                       tun_key->ipv4_tos = nla_get_u8(a);
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_TTL:
+                       tun_key->ipv4_ttl = nla_get_u8(a);
+                       ttl = true;
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
+                       tun_key->tun_flags |= TUNNEL_DONT_FRAGMENT;
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_CSUM:
+                       tun_key->tun_flags |= TUNNEL_CSUM;
+                       break;
+               default:
+                       return -EINVAL;
+
+               }
+       }
+       if (rem > 0)
+               return -EINVAL;
+
+       if (!tun_key->ipv4_dst)
+               return -EINVAL;
+
+       if (!ttl)
+               return -EINVAL;
+
+       return 0;
+}
+
+int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
+                          const struct ovs_key_ipv4_tunnel *tun_key)
+{
+       struct nlattr *nla;
+
+       nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
+       if (!nla)
+               return -EMSGSIZE;
+
+       if (tun_key->tun_flags & TUNNEL_KEY &&
+           nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id))
+               return -EMSGSIZE;
+       if (tun_key->ipv4_src &&
+           nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src))
+               return -EMSGSIZE;
+       if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst))
+               return -EMSGSIZE;
+       if (tun_key->ipv4_tos &&
+           nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos))
+               return -EMSGSIZE;
+       if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl))
+               return -EMSGSIZE;
+       if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+               nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
+               return -EMSGSIZE;
+       if ((tun_key->tun_flags & TUNNEL_CSUM) &&
+               nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
+               return -EMSGSIZE;
+
+       nla_nest_end(skb, nla);
+       return 0;
+}
+
 /**
  * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
  * @swkey: receives the extracted flow key.
@@ -996,6 +1120,14 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
                attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
        }
 
+       if (attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
+               err = ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key);
+               if (err)
+                       return err;
+
+               attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
+       }
+
        /* Data attributes. */
        if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
                return -EINVAL;
@@ -1122,10 +1254,9 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
 
 /**
  * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
- * @priority: receives the skb priority
- * @mark: receives the skb mark
- * @in_port: receives the extracted input port.
- * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
+ * @key_len: Length of key in @flow.  Used for calculating flow hash.
+ * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
  * sequence.
  *
  * This parses a series of Netlink attributes that form a flow key, which must
@@ -1133,42 +1264,56 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
  * get the metadata, that is, the parts of the flow key that cannot be
  * extracted from the packet itself.
  */
-int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port,
-                              const struct nlattr *attr)
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
+                                  const struct nlattr *attr)
 {
+       struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
        const struct nlattr *nla;
        int rem;
 
-       *in_port = DP_MAX_PORTS;
-       *priority = 0;
-       *mark = 0;
+       flow->key.phy.in_port = DP_MAX_PORTS;
+       flow->key.phy.priority = 0;
+       flow->key.phy.skb_mark = 0;
+       memset(tun_key, 0, sizeof(flow->key.tun_key));
 
        nla_for_each_nested(nla, attr, rem) {
                int type = nla_type(nla);
 
                if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
+                       int err;
+
                        if (nla_len(nla) != ovs_key_lens[type])
                                return -EINVAL;
 
                        switch (type) {
                        case OVS_KEY_ATTR_PRIORITY:
-                               *priority = nla_get_u32(nla);
+                               flow->key.phy.priority = nla_get_u32(nla);
+                               break;
+
+                       case OVS_KEY_ATTR_TUNNEL:
+                               err = ovs_ipv4_tun_from_nlattr(nla, tun_key);
+                               if (err)
+                                       return err;
                                break;
 
                        case OVS_KEY_ATTR_IN_PORT:
                                if (nla_get_u32(nla) >= DP_MAX_PORTS)
                                        return -EINVAL;
-                               *in_port = nla_get_u32(nla);
+                               flow->key.phy.in_port = nla_get_u32(nla);
                                break;
 
                        case OVS_KEY_ATTR_SKB_MARK:
-                               *mark = nla_get_u32(nla);
+                               flow->key.phy.skb_mark = nla_get_u32(nla);
                                break;
                        }
                }
        }
        if (rem)
                return -EINVAL;
+
+       flow->hash = ovs_flow_hash(&flow->key,
+                                  flow_key_start(&flow->key), key_len);
+
        return 0;
 }
 
@@ -1181,6 +1326,10 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
            nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
                goto nla_put_failure;
 
+       if (swkey->tun_key.ipv4_dst &&
+           ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key))
+               goto nla_put_failure;
+
        if (swkey->phy.in_port != DP_MAX_PORTS &&
            nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
                goto nla_put_failure;
index 0875fde65b9cc21815b725ac84d579efbd64a2da..66ef7220293eab573f176d70878f7d6698e68a13 100644 (file)
@@ -40,7 +40,38 @@ struct sw_flow_actions {
        struct nlattr actions[];
 };
 
+/* Used to memset ovs_key_ipv4_tunnel padding. */
+#define OVS_TUNNEL_KEY_SIZE                                    \
+       (offsetof(struct ovs_key_ipv4_tunnel, ipv4_ttl) +       \
+       FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, ipv4_ttl))
+
+struct ovs_key_ipv4_tunnel {
+       __be64 tun_id;
+       __be32 ipv4_src;
+       __be32 ipv4_dst;
+       __be16 tun_flags;
+       u8   ipv4_tos;
+       u8   ipv4_ttl;
+};
+
+static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
+                                        const struct iphdr *iph, __be64 tun_id,
+                                        __be16 tun_flags)
+{
+       tun_key->tun_id = tun_id;
+       tun_key->ipv4_src = iph->saddr;
+       tun_key->ipv4_dst = iph->daddr;
+       tun_key->ipv4_tos = iph->tos;
+       tun_key->ipv4_ttl = iph->ttl;
+       tun_key->tun_flags = tun_flags;
+
+       /* clear struct padding. */
+       memset((unsigned char *) tun_key + OVS_TUNNEL_KEY_SIZE, 0,
+              sizeof(*tun_key) - OVS_TUNNEL_KEY_SIZE);
+}
+
 struct sw_flow_key {
+       struct ovs_key_ipv4_tunnel tun_key;  /* Encapsulating tunnel key. */
        struct {
                u32     priority;       /* Packet QoS priority. */
                u32     skb_mark;       /* SKB mark. */
@@ -130,7 +161,7 @@ struct sw_flow *ovs_flow_alloc(void);
 void ovs_flow_deferred_free(struct sw_flow *);
 void ovs_flow_free(struct sw_flow *flow);
 
-struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
+struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len);
 void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
 
 int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
@@ -141,10 +172,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies);
 int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
 int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
                      const struct nlattr *);
-int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port,
-                              const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
+                                 const struct nlattr *attr);
 
-#define MAX_ACTIONS_BUFSIZE    (16 * 1024)
+#define MAX_ACTIONS_BUFSIZE    (32 * 1024)
 #define TBL_MIN_BUCKETS                1024
 
 struct flow_table {
@@ -173,11 +204,15 @@ void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
 struct flow_table *ovs_flow_tbl_alloc(int new_size);
 struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
 struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+                        struct sw_flow_key *key, int key_len);
 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
-u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
 
 struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
 extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
+int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
+                        struct ovs_key_ipv4_tunnel *tun_key);
+int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
+                       const struct ovs_key_ipv4_tunnel *tun_key);
 
 #endif /* flow.h */
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
new file mode 100644 (file)
index 0000000..943e5c4
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#if IS_ENABLED(CONFIG_NET_IPGRE_DEMUX)
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/if.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/in_route.h>
+#include <linux/inetdevice.h>
+#include <linux/jhash.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/rculist.h>
+#include <net/route.h>
+#include <net/xfrm.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/gre.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/protocol.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+/* Returns the least-significant 32 bits of a __be64. */
+static __be32 be64_get_low32(__be64 x)
+{
+#ifdef __BIG_ENDIAN
+       return (__force __be32)x;
+#else
+       return (__force __be32)((__force u64)x >> 32);
+#endif
+}
+
+static __be16 filter_tnl_flags(__be16 flags)
+{
+       return flags & (TUNNEL_CSUM | TUNNEL_KEY);
+}
+
+static struct sk_buff *__build_header(struct sk_buff *skb,
+                                     int tunnel_hlen)
+{
+       const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
+       struct tnl_ptk_info tpi;
+
+       skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
+       if (IS_ERR(skb))
+               return NULL;
+
+       tpi.flags = filter_tnl_flags(tun_key->tun_flags);
+       tpi.proto = htons(ETH_P_TEB);
+       tpi.key = be64_get_low32(tun_key->tun_id);
+       tpi.seq = 0;
+       gre_build_header(skb, &tpi, tunnel_hlen);
+
+       return skb;
+}
+
+static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
+{
+#ifdef __BIG_ENDIAN
+       return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
+#else
+       return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
+#endif
+}
+
+/* Called with rcu_read_lock and BH disabled. */
+static int gre_rcv(struct sk_buff *skb,
+                  const struct tnl_ptk_info *tpi)
+{
+       struct ovs_key_ipv4_tunnel tun_key;
+       struct ovs_net *ovs_net;
+       struct vport *vport;
+       __be64 key;
+
+       ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+       vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+       if (unlikely(!vport))
+               return PACKET_REJECT;
+
+       key = key_to_tunnel_id(tpi->key, tpi->seq);
+       ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key,
+                             filter_tnl_flags(tpi->flags));
+
+       ovs_vport_receive(vport, skb, &tun_key);
+       return PACKET_RCVD;
+}
+
+static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+       struct net *net = ovs_dp_get_net(vport->dp);
+       struct flowi4 fl;
+       struct rtable *rt;
+       int min_headroom;
+       int tunnel_hlen;
+       __be16 df;
+       int err;
+
+       if (unlikely(!OVS_CB(skb)->tun_key)) {
+               err = -EINVAL;
+               goto error;
+       }
+
+       /* Route lookup */
+       memset(&fl, 0, sizeof(fl));
+       fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
+       fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
+       fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
+       fl.flowi4_mark = skb->mark;
+       fl.flowi4_proto = IPPROTO_GRE;
+
+       rt = ip_route_output_key(net, &fl);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+
+       tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags);
+
+       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+                       + tunnel_hlen + sizeof(struct iphdr)
+                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+       if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+               int head_delta = SKB_DATA_ALIGN(min_headroom -
+                                               skb_headroom(skb) +
+                                               16);
+               err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+                                       0, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto err_free_rt;
+       }
+
+       if (vlan_tx_tag_present(skb)) {
+               if (unlikely(!__vlan_put_tag(skb,
+                                            skb->vlan_proto,
+                                            vlan_tx_tag_get(skb)))) {
+                       err = -ENOMEM;
+                       goto err_free_rt;
+               }
+               skb->vlan_tci = 0;
+       }
+
+       /* Push Tunnel header. */
+       skb = __build_header(skb, tunnel_hlen);
+       if (unlikely(!skb)) {
+               err = 0;
+               goto err_free_rt;
+       }
+
+       df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+               htons(IP_DF) : 0;
+
+       skb->local_df = 1;
+
+       return iptunnel_xmit(net, rt, skb, fl.saddr,
+                            OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
+                            OVS_CB(skb)->tun_key->ipv4_tos,
+                            OVS_CB(skb)->tun_key->ipv4_ttl, df);
+err_free_rt:
+       ip_rt_put(rt);
+error:
+       return err;
+}
+
+static struct gre_cisco_protocol gre_protocol = {
+       .handler        = gre_rcv,
+       .priority       = 1,
+};
+
+static int gre_ports;
+static int gre_init(void)
+{
+       int err;
+
+       gre_ports++;
+       if (gre_ports > 1)
+               return 0;
+
+       err = gre_cisco_register(&gre_protocol);
+       if (err)
+               pr_warn("cannot register gre protocol handler\n");
+
+       return err;
+}
+
+static void gre_exit(void)
+{
+       gre_ports--;
+       if (gre_ports > 0)
+               return;
+
+       gre_cisco_unregister(&gre_protocol);
+}
+
+static const char *gre_get_name(const struct vport *vport)
+{
+       return vport_priv(vport);
+}
+
+static struct vport *gre_create(const struct vport_parms *parms)
+{
+       struct net *net = ovs_dp_get_net(parms->dp);
+       struct ovs_net *ovs_net;
+       struct vport *vport;
+       int err;
+
+       err = gre_init();
+       if (err)
+               return ERR_PTR(err);
+
+       ovs_net = net_generic(net, ovs_net_id);
+       if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
+               vport = ERR_PTR(-EEXIST);
+               goto error;
+       }
+
+       vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
+       if (IS_ERR(vport))
+               goto error;
+
+       strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
+       rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
+       return vport;
+
+error:
+       gre_exit();
+       return vport;
+}
+
+static void gre_tnl_destroy(struct vport *vport)
+{
+       struct net *net = ovs_dp_get_net(vport->dp);
+       struct ovs_net *ovs_net;
+
+       ovs_net = net_generic(net, ovs_net_id);
+
+       rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL);
+       ovs_vport_deferred_free(vport);
+       gre_exit();
+}
+
+const struct vport_ops ovs_gre_vport_ops = {
+       .type           = OVS_VPORT_TYPE_GRE,
+       .create         = gre_create,
+       .destroy        = gre_tnl_destroy,
+       .get_name       = gre_get_name,
+       .send           = gre_tnl_send,
+};
+#endif
index 84e0a03791867449247cc0c161ab434f9a6e5c15..98d3edbbc2356e1296fcc86b59956387db916ca4 100644 (file)
@@ -67,7 +67,7 @@ static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netde
 static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        rcu_read_lock();
-       ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
+       ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
        rcu_read_unlock();
        return 0;
 }
@@ -221,6 +221,7 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
        skb->dev = netdev;
        skb->pkt_type = PACKET_HOST;
        skb->protocol = eth_type_trans(skb, netdev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
        netif_rx(skb);
 
index 4f01c6d2ffa40dcdb92856d6b35782a43679ad66..5982f3f62835359765c3b1f822f69053e2da4b90 100644 (file)
@@ -49,7 +49,9 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
                return;
 
        skb_push(skb, ETH_HLEN);
-       ovs_vport_receive(vport, skb);
+       ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
+
+       ovs_vport_receive(vport, skb, NULL);
        return;
 
 error:
@@ -170,7 +172,7 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
                net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
                                     netdev_vport->dev->name,
                                     packet_length(skb), mtu);
-               goto error;
+               goto drop;
        }
 
        skb->dev = netdev_vport->dev;
@@ -179,9 +181,8 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
 
        return len;
 
-error:
+drop:
        kfree_skb(skb);
-       ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
        return 0;
 }
 
index a3cb3a32cd77140369a448337120c79f8a040ab0..dd298b5c5cdb25e1a721e5b845353ce055c02e60 100644 (file)
@@ -39,6 +39,5 @@ netdev_vport_priv(const struct vport *vport)
 }
 
 const char *ovs_netdev_get_name(const struct vport *);
-const char *ovs_netdev_get_config(const struct vport *);
 
 #endif /* vport_netdev.h */
index 720623190eaa82d9ad4a2fcf460a0270cf57a03b..ba81294219ac887fab0fb337b084a304f961fe32 100644 (file)
 static const struct vport_ops *vport_ops_list[] = {
        &ovs_netdev_vport_ops,
        &ovs_internal_vport_ops,
+
+#if IS_ENABLED(CONFIG_NET_IPGRE_DEMUX)
+       &ovs_gre_vport_ops,
+#endif
 };
 
 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
@@ -325,7 +329,8 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
  * Must be called with rcu_read_lock.  The packet cannot be shared and
  * skb->data should point to the Ethernet header.
  */
-void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
+                      struct ovs_key_ipv4_tunnel *tun_key)
 {
        struct pcpu_tstats *stats;
 
@@ -335,6 +340,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
        stats->rx_bytes += skb->len;
        u64_stats_update_end(&stats->syncp);
 
+       OVS_CB(skb)->tun_key = tun_key;
        ovs_dp_process_received_packet(vport, skb);
 }
 
@@ -351,7 +357,7 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
 {
        int sent = vport->ops->send(vport, skb);
 
-       if (likely(sent)) {
+       if (likely(sent > 0)) {
                struct pcpu_tstats *stats;
 
                stats = this_cpu_ptr(vport->percpu_stats);
@@ -360,7 +366,12 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
                stats->tx_packets++;
                stats->tx_bytes += sent;
                u64_stats_update_end(&stats->syncp);
-       }
+       } else if (sent < 0) {
+               ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
+               kfree_skb(skb);
+       } else
+               ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
+
        return sent;
 }
 
@@ -371,7 +382,7 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
  * @err_type: one of enum vport_err_type types to indicate the error type
  *
  * If using the vport generic stats layer indicate that an error of the given
- * type has occured.
+ * type has occurred.
  */
 void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
 {
@@ -397,3 +408,18 @@ void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
 
        spin_unlock(&vport->stats_lock);
 }
+
+static void free_vport_rcu(struct rcu_head *rcu)
+{
+       struct vport *vport = container_of(rcu, struct vport, rcu);
+
+       ovs_vport_free(vport);
+}
+
+void ovs_vport_deferred_free(struct vport *vport)
+{
+       if (!vport)
+               return;
+
+       call_rcu(&vport->rcu, free_vport_rcu);
+}
index 68a377bc0841e7bfd0a5b1200a4a3b938adc9251..376045c42f8b43ba156acb2fd723398b108208b6 100644 (file)
@@ -34,6 +34,11 @@ struct vport_parms;
 
 /* The following definitions are for users of the vport subsytem: */
 
+/* The following definitions are for users of the vport subsytem: */
+struct vport_net {
+       struct vport __rcu *gre_vport;
+};
+
 int ovs_vport_init(void);
 void ovs_vport_exit(void);
 
@@ -123,9 +128,8 @@ struct vport_parms {
  * existing vport to a &struct sk_buff.  May be %NULL for a vport that does not
  * have any configuration.
  * @get_name: Get the device's name.
- * @get_config: Get the device's configuration.
- * May be null if the device does not have an ifindex.
- * @send: Send a packet on the device.  Returns the length of the packet sent.
+ * @send: Send a packet on the device.  Returns the length of the packet sent,
+ * zero for dropped packets or negative for error.
  */
 struct vport_ops {
        enum ovs_vport_type type;
@@ -139,7 +143,6 @@ struct vport_ops {
 
        /* Called with rcu_read_lock or ovs_mutex. */
        const char *(*get_name)(const struct vport *);
-       void (*get_config)(const struct vport *, void *);
 
        int (*send)(struct vport *, struct sk_buff *);
 };
@@ -154,6 +157,7 @@ enum vport_err_type {
 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
                              const struct vport_parms *);
 void ovs_vport_free(struct vport *);
+void ovs_vport_deferred_free(struct vport *vport);
 
 #define VPORT_ALIGN 8
 
@@ -186,12 +190,21 @@ static inline struct vport *vport_from_priv(const void *priv)
        return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
 }
 
-void ovs_vport_receive(struct vport *, struct sk_buff *);
+void ovs_vport_receive(struct vport *, struct sk_buff *,
+                      struct ovs_key_ipv4_tunnel *);
 void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
 
 /* List of statically compiled vport implementations.  Don't forget to also
  * add yours to the list at the top of vport.c. */
 extern const struct vport_ops ovs_netdev_vport_ops;
 extern const struct vport_ops ovs_internal_vport_ops;
+extern const struct vport_ops ovs_gre_vport_ops;
+
+static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
+                                     const void *start, unsigned int len)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+}
 
 #endif /* vport.h */
index 8ec1bca7f85908c22534eb2023fef135859997c3..4b66c752eae5d99b2bfe5fa7832a1301d20519ef 100644 (file)
@@ -2851,12 +2851,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
                return -EOPNOTSUPP;
 
        uaddr->sa_family = AF_PACKET;
+       memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
        rcu_read_lock();
        dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
        if (dev)
-               strncpy(uaddr->sa_data, dev->name, 14);
-       else
-               memset(uaddr->sa_data, 0, 14);
+               strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
        rcu_read_unlock();
        *uaddr_len = sizeof(*uaddr);
 
@@ -3331,10 +3330,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 }
 
 
-static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
+static int packet_notifier(struct notifier_block *this,
+                          unsigned long msg, void *ptr)
 {
        struct sock *sk;
-       struct net_device *dev = data;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct net *net = dev_net(dev);
 
        rcu_read_lock();
index 45a7df6575ded371ef75fc11fc41a2b48371e51e..56a6146ac94bd331502294244ee2daa637f2054b 100644 (file)
@@ -292,9 +292,9 @@ static void phonet_route_autodel(struct net_device *dev)
 
 /* notify Phonet of device events */
 static int phonet_device_notify(struct notifier_block *me, unsigned long what,
-                               void *arg)
+                               void *ptr)
 {
-       struct net_device *dev = arg;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        switch (what) {
        case NETDEV_REGISTER:
index d6bbbbd0af182352b41111ce9a2eefb22ef9be85..c02a8c4bc11fd319e08608315a3bec864a462c5a 100644 (file)
@@ -61,13 +61,13 @@ void phonet_get_local_port_range(int *min, int *max)
        } while (read_seqretry(&local_port_range_lock, seq));
 }
 
-static int proc_local_port_range(ctl_table *table, int write,
+static int proc_local_port_range(struct ctl_table *table, int write,
                                void __user *buffer,
                                size_t *lenp, loff_t *ppos)
 {
        int ret;
        int range[2] = {local_port_range[0], local_port_range[1]};
-       ctl_table tmp = {
+       struct ctl_table tmp = {
                .data = &range,
                .maxlen = sizeof(range),
                .mode = table->mode,
index 7e643bafb4afce715c00b0628868ffed71060431..e4e41b3afce7134119dfebd1b22bf8464492a663 100644 (file)
@@ -61,7 +61,7 @@ static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64;
  */
 unsigned int rds_ib_sysctl_flow_control = 0;
 
-static ctl_table rds_ib_sysctl_table[] = {
+static struct ctl_table rds_ib_sysctl_table[] = {
        {
                .procname       = "max_send_wr",
                .data           = &rds_ib_sysctl_max_send_wr,
index 5d5ebd576f3f65bcd3c399a43b99f92f5e99f5d3..89c91515ed0c605b6ee63996d0c0a9692ea8fe91 100644 (file)
@@ -55,7 +55,7 @@ static unsigned long rds_iw_sysctl_max_unsig_bytes_max = ~0UL;
 
 unsigned int rds_iw_sysctl_flow_control = 1;
 
-static ctl_table rds_iw_sysctl_table[] = {
+static struct ctl_table rds_iw_sysctl_table[] = {
        {
                .procname       = "max_send_wr",
                .data           = &rds_iw_sysctl_max_send_wr,
index 907214b4c4d071444dbb67ae3cb4559fe3a8267a..b5cb2aa08f33aa62ac5bff73684fc228f74dce7e 100644 (file)
@@ -49,7 +49,7 @@ unsigned int  rds_sysctl_max_unacked_bytes = (16 << 20);
 
 unsigned int rds_sysctl_ping_enable = 1;
 
-static ctl_table rds_sysctl_rds_table[] = {
+static struct ctl_table rds_sysctl_rds_table[] = {
        {
                .procname       = "reconnect_min_delay_ms",
                .data           = &rds_sysctl_reconnect_min_jiffies,
index 9c834745159786e5e1d4338f51860be00943a0fd..e98fcfbe6007919d6c114708cde848a649bb2e4e 100644 (file)
@@ -202,10 +202,10 @@ static void rose_kill_by_device(struct net_device *dev)
 /*
  *     Handle device status changes.
  */
-static int rose_device_event(struct notifier_block *this, unsigned long event,
-       void *ptr)
+static int rose_device_event(struct notifier_block *this,
+                            unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
index 94ca9c2ccd692d2a278615221d60abc0ed3037ef..89a9278795a9fc7e2177946bb52c25a205d66956 100644 (file)
@@ -24,7 +24,7 @@ static int min_window[] = {1}, max_window[] = {7};
 
 static struct ctl_table_header *rose_table_header;
 
-static ctl_table rose_table[] = {
+static struct ctl_table rose_table[] = {
        {
                .procname       = "restart_request_timeout",
                .data           = &sysctl_rose_restart_request_timeout,
index 5d676edc22a66010ee93eea8b4663a45f60b4c75..977c10e0631b6dfe4ead45af617c0ad93c4a0759 100644 (file)
@@ -243,7 +243,7 @@ nla_put_failure:
 static int mirred_device_event(struct notifier_block *unused,
                               unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct tcf_mirred *m;
 
        if (event == NETDEV_UNREGISTER)
index 823463adbd21fe9c46b87fc20d1e2393e8d5e072..189e3c5b3d098a0d224a8fb2a04b1bd76e47b3ac 100644 (file)
@@ -231,14 +231,14 @@ override:
        }
        if (R_tab) {
                police->rate_present = true;
-               psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
+               psched_ratecfg_precompute(&police->rate, &R_tab->rate);
                qdisc_put_rtab(R_tab);
        } else {
                police->rate_present = false;
        }
        if (P_tab) {
                police->peak_present = true;
-               psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
+               psched_ratecfg_precompute(&police->peak, &P_tab->rate);
                qdisc_put_rtab(P_tab);
        } else {
                police->peak_present = false;
@@ -376,9 +376,9 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        };
 
        if (police->rate_present)
-               opt.rate.rate = psched_ratecfg_getrate(&police->rate);
+               psched_ratecfg_getrate(&opt.rate, &police->rate);
        if (police->peak_present)
-               opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
+               psched_ratecfg_getrate(&opt.peakrate, &police->peak);
        if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
                goto nla_put_failure;
        if (police->tcfp_result &&
index 2b935e7cfe7b7bbb78c3cc914a39929f1db766a8..281c1bded1f60f94934e406c9cafe076e4e2706d 100644 (file)
@@ -291,17 +291,18 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
 {
        struct qdisc_rate_table *rtab;
 
+       if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+           nla_len(tab) != TC_RTAB_SIZE)
+               return NULL;
+
        for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
-               if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
+               if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
+                   !memcmp(&rtab->data, nla_data(tab), 1024)) {
                        rtab->refcnt++;
                        return rtab;
                }
        }
 
-       if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
-           nla_len(tab) != TC_RTAB_SIZE)
-               return NULL;
-
        rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
        if (rtab) {
                rtab->rate = *r;
index 1bc210ffcba2a524750b3382d444a87db2c08c2c..71a568862557c26cb9fd97bfbd26531b55ad4283 100644 (file)
@@ -130,7 +130,7 @@ struct cbq_class {
        psched_time_t           penalized;
        struct gnet_stats_basic_packed bstats;
        struct gnet_stats_queue qstats;
-       struct gnet_stats_rate_est rate_est;
+       struct gnet_stats_rate_est64 rate_est;
        struct tc_cbq_xstats    xstats;
 
        struct tcf_proto        *filter_list;
index 759b308d1a8d2ac427893b1258d4ff508e963963..8302717ea3034247bc27597e067ac8771dc973b5 100644 (file)
@@ -25,7 +25,7 @@ struct drr_class {
 
        struct gnet_stats_basic_packed          bstats;
        struct gnet_stats_queue         qstats;
-       struct gnet_stats_rate_est      rate_est;
+       struct gnet_stats_rate_est64    rate_est;
        struct list_head                alist;
        struct Qdisc                    *qdisc;
 
index eac7e0ee23c18708354e3cef8c237d289b81235f..4626cef4b76ea631e32d082d34828ffd2e465459 100644 (file)
@@ -898,38 +898,36 @@ void dev_shutdown(struct net_device *dev)
        WARN_ON(timer_pending(&dev->watchdog_timer));
 }
 
-void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+                              const struct tc_ratespec *conf)
 {
-       u64 factor;
-       u64 mult;
-       int shift;
-
-       r->rate_bps = (u64)rate << 3;
-       r->shift = 0;
+       memset(r, 0, sizeof(*r));
+       r->overhead = conf->overhead;
+       r->rate_bytes_ps = conf->rate;
        r->mult = 1;
        /*
-        * Calibrate mult, shift so that token counting is accurate
-        * for smallest packet size (64 bytes).  Token (time in ns) is
-        * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps.  It will
-        * work as long as the smallest packet transfer time can be
-        * accurately represented in nanosec.
+        * The deal here is to replace a divide by a reciprocal one
+        * in fast path (a reciprocal divide is a multiply and a shift)
+        *
+        * Normal formula would be :
+        *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
+        *
+        * We compute mult/shift to use instead :
+        *  time_in_ns = (len * mult) >> shift;
+        *
+        * We try to get the highest possible mult value for accuracy,
+        * but have to make sure no overflows will ever happen.
         */
-       if (r->rate_bps > 0) {
-               /*
-                * Higher shift gives better accuracy.  Find the largest
-                * shift such that mult fits in 32 bits.
-                */
-               for (shift = 0; shift < 16; shift++) {
-                       r->shift = shift;
-                       factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-                       mult = div64_u64(factor, r->rate_bps);
-                       if (mult > UINT_MAX)
+       if (r->rate_bytes_ps > 0) {
+               u64 factor = NSEC_PER_SEC;
+
+               for (;;) {
+                       r->mult = div64_u64(factor, r->rate_bytes_ps);
+                       if (r->mult & (1U << 31) || factor & (1ULL << 63))
                                break;
+                       factor <<= 1;
+                       r->shift++;
                }
-
-               r->shift = shift - 1;
-               factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
-               r->mult = div64_u64(factor, r->rate_bps);
        }
 }
 EXPORT_SYMBOL(psched_ratecfg_precompute);
index 9facea03faeb89e6b51f54a2f4c01729979b8584..c4075610502cf3f53f4ac3f1bc1be7b096e7f488 100644 (file)
@@ -114,7 +114,7 @@ struct hfsc_class {
 
        struct gnet_stats_basic_packed bstats;
        struct gnet_stats_queue qstats;
-       struct gnet_stats_rate_est rate_est;
+       struct gnet_stats_rate_est64 rate_est;
        unsigned int    level;          /* class level in hierarchy */
        struct tcf_proto *filter_list;  /* filter list */
        unsigned int    filter_cnt;     /* filter count */
index 79b1876b6cd260fac7d150ceeaa7a5a647266a56..c2124ea29f4594304c3b7ecb02af411ed62fbebe 100644 (file)
@@ -65,6 +65,10 @@ static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis f
 module_param    (htb_hysteresis, int, 0640);
 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
 
+static int htb_rate_est = 0; /* htb classes have a default rate estimator */
+module_param(htb_rate_est, int, 0640);
+MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
+
 /* used internaly to keep status of single class */
 enum htb_cmode {
        HTB_CANT_SEND,          /* class can't send and can't borrow */
@@ -72,95 +76,105 @@ enum htb_cmode {
        HTB_CAN_SEND            /* class can send */
 };
 
-/* interior & leaf nodes; props specific to leaves are marked L: */
+struct htb_prio {
+       union {
+               struct rb_root  row;
+               struct rb_root  feed;
+       };
+       struct rb_node  *ptr;
+       /* When class changes from state 1->2 and disconnects from
+        * parent's feed then we lost ptr value and start from the
+        * first child again. Here we store classid of the
+        * last valid ptr (used when ptr is NULL).
+        */
+       u32             last_ptr_id;
+};
+
+/* interior & leaf nodes; props specific to leaves are marked L:
+ * To reduce false sharing, place mostly read fields at beginning,
+ * and mostly written ones at the end.
+ */
 struct htb_class {
        struct Qdisc_class_common common;
-       /* general class parameters */
-       struct gnet_stats_basic_packed bstats;
-       struct gnet_stats_queue qstats;
-       struct gnet_stats_rate_est rate_est;
-       struct tc_htb_xstats xstats;    /* our special stats */
-       int refcnt;             /* usage count of this class */
+       struct psched_ratecfg   rate;
+       struct psched_ratecfg   ceil;
+       s64                     buffer, cbuffer;/* token bucket depth/rate */
+       s64                     mbuffer;        /* max wait time */
+       int                     prio;           /* these two are used only by leaves... */
+       int                     quantum;        /* but stored for parent-to-leaf return */
 
-       /* topology */
-       int level;              /* our level (see above) */
-       unsigned int children;
-       struct htb_class *parent;       /* parent class */
+       struct tcf_proto        *filter_list;   /* class attached filters */
+       int                     filter_cnt;
+       int                     refcnt;         /* usage count of this class */
 
-       int prio;               /* these two are used only by leaves... */
-       int quantum;            /* but stored for parent-to-leaf return */
+       int                     level;          /* our level (see above) */
+       unsigned int            children;
+       struct htb_class        *parent;        /* parent class */
+
+       struct gnet_stats_rate_est64 rate_est;
+
+       /*
+        * Written often fields
+        */
+       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_queue qstats;
+       struct tc_htb_xstats    xstats; /* our special stats */
+
+       /* token bucket parameters */
+       s64                     tokens, ctokens;/* current number of tokens */
+       s64                     t_c;            /* checkpoint time */
 
        union {
                struct htb_class_leaf {
-                       struct Qdisc *q;
-                       int deficit[TC_HTB_MAXDEPTH];
                        struct list_head drop_list;
+                       int             deficit[TC_HTB_MAXDEPTH];
+                       struct Qdisc    *q;
                } leaf;
                struct htb_class_inner {
-                       struct rb_root feed[TC_HTB_NUMPRIO];    /* feed trees */
-                       struct rb_node *ptr[TC_HTB_NUMPRIO];    /* current class ptr */
-                       /* When class changes from state 1->2 and disconnects from
-                        * parent's feed then we lost ptr value and start from the
-                        * first child again. Here we store classid of the
-                        * last valid ptr (used when ptr is NULL).
-                        */
-                       u32 last_ptr_id[TC_HTB_NUMPRIO];
+                       struct htb_prio clprio[TC_HTB_NUMPRIO];
                } inner;
        } un;
-       struct rb_node node[TC_HTB_NUMPRIO];    /* node for self or feed tree */
-       struct rb_node pq_node; /* node for event queue */
-       psched_time_t pq_key;
+       s64                     pq_key;
 
-       int prio_activity;      /* for which prios are we active */
-       enum htb_cmode cmode;   /* current mode of the class */
-
-       /* class attached filters */
-       struct tcf_proto *filter_list;
-       int filter_cnt;
+       int                     prio_activity;  /* for which prios are we active */
+       enum htb_cmode          cmode;          /* current mode of the class */
+       struct rb_node          pq_node;        /* node for event queue */
+       struct rb_node          node[TC_HTB_NUMPRIO];   /* node for self or feed tree */
+};
 
-       /* token bucket parameters */
-       struct psched_ratecfg rate;
-       struct psched_ratecfg ceil;
-       s64 buffer, cbuffer;    /* token bucket depth/rate */
-       psched_tdiff_t mbuffer; /* max wait time */
-       s64 tokens, ctokens;    /* current number of tokens */
-       psched_time_t t_c;      /* checkpoint time */
+struct htb_level {
+       struct rb_root  wait_pq;
+       struct htb_prio hprio[TC_HTB_NUMPRIO];
 };
 
 struct htb_sched {
        struct Qdisc_class_hash clhash;
-       struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
-
-       /* self list - roots of self generating tree */
-       struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
-       int row_mask[TC_HTB_MAXDEPTH];
-       struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
-       u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
+       int                     defcls;         /* class where unclassified flows go to */
+       int                     rate2quantum;   /* quant = rate / rate2quantum */
 
-       /* self wait list - roots of wait PQs per row */
-       struct rb_root wait_pq[TC_HTB_MAXDEPTH];
+       /* filters for qdisc itself */
+       struct tcf_proto        *filter_list;
 
-       /* time of nearest event per level (row) */
-       psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
+#define HTB_WARN_TOOMANYEVENTS 0x1
+       unsigned int            warned; /* only one warning */
+       int                     direct_qlen;
+       struct work_struct      work;
 
-       int defcls;             /* class where unclassified flows go to */
+       /* non shaped skbs; let them go directly thru */
+       struct sk_buff_head     direct_queue;
+       long                    direct_pkts;
 
-       /* filters for qdisc itself */
-       struct tcf_proto *filter_list;
+       struct qdisc_watchdog   watchdog;
 
-       int rate2quantum;       /* quant = rate / rate2quantum */
-       psched_time_t now;      /* cached dequeue time */
-       struct qdisc_watchdog watchdog;
+       s64                     now;    /* cached dequeue time */
+       struct list_head        drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 
-       /* non shaped skbs; let them go directly thru */
-       struct sk_buff_head direct_queue;
-       int direct_qlen;        /* max qlen of above */
+       /* time of nearest event per level (row) */
+       s64                     near_ev_cache[TC_HTB_MAXDEPTH];
 
-       long direct_pkts;
+       int                     row_mask[TC_HTB_MAXDEPTH];
 
-#define HTB_WARN_TOOMANYEVENTS 0x1
-       unsigned int warned;    /* only one warning */
-       struct work_struct work;
+       struct htb_level        hlevel[TC_HTB_MAXDEPTH];
 };
 
 /* find class in global hash table using given handle */
@@ -276,7 +290,7 @@ static void htb_add_to_id_tree(struct rb_root *root,
 static void htb_add_to_wait_tree(struct htb_sched *q,
                                 struct htb_class *cl, s64 delay)
 {
-       struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
+       struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
 
        cl->pq_key = q->now + delay;
        if (cl->pq_key == q->now)
@@ -296,7 +310,7 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
                        p = &parent->rb_left;
        }
        rb_link_node(&cl->pq_node, parent, p);
-       rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
+       rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
 }
 
 /**
@@ -323,7 +337,7 @@ static inline void htb_add_class_to_row(struct htb_sched *q,
        while (mask) {
                int prio = ffz(~mask);
                mask &= ~(1 << prio);
-               htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
+               htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
        }
 }
 
@@ -349,16 +363,18 @@ static inline void htb_remove_class_from_row(struct htb_sched *q,
                                                 struct htb_class *cl, int mask)
 {
        int m = 0;
+       struct htb_level *hlevel = &q->hlevel[cl->level];
 
        while (mask) {
                int prio = ffz(~mask);
+               struct htb_prio *hprio = &hlevel->hprio[prio];
 
                mask &= ~(1 << prio);
-               if (q->ptr[cl->level][prio] == cl->node + prio)
-                       htb_next_rb_node(q->ptr[cl->level] + prio);
+               if (hprio->ptr == cl->node + prio)
+                       htb_next_rb_node(&hprio->ptr);
 
-               htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
-               if (!q->row[cl->level][prio].rb_node)
+               htb_safe_rb_erase(cl->node + prio, &hprio->row);
+               if (!hprio->row.rb_node)
                        m |= 1 << prio;
        }
        q->row_mask[cl->level] &= ~m;
@@ -382,13 +398,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
                        int prio = ffz(~m);
                        m &= ~(1 << prio);
 
-                       if (p->un.inner.feed[prio].rb_node)
+                       if (p->un.inner.clprio[prio].feed.rb_node)
                                /* parent already has its feed in use so that
                                 * reset bit in mask as parent is already ok
                                 */
                                mask &= ~(1 << prio);
 
-                       htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
+                       htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
                }
                p->prio_activity |= mask;
                cl = p;
@@ -418,18 +434,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
                        int prio = ffz(~m);
                        m &= ~(1 << prio);
 
-                       if (p->un.inner.ptr[prio] == cl->node + prio) {
+                       if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
                                /* we are removing child which is pointed to from
                                 * parent feed - forget the pointer but remember
                                 * classid
                                 */
-                               p->un.inner.last_ptr_id[prio] = cl->common.classid;
-                               p->un.inner.ptr[prio] = NULL;
+                               p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
+                               p->un.inner.clprio[prio].ptr = NULL;
                        }
 
-                       htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
+                       htb_safe_rb_erase(cl->node + prio,
+                                         &p->un.inner.clprio[prio].feed);
 
-                       if (!p->un.inner.feed[prio].rb_node)
+                       if (!p->un.inner.clprio[prio].feed.rb_node)
                                mask |= 1 << prio;
                }
 
@@ -644,7 +661,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
                htb_change_class_mode(q, cl, &diff);
                if (old_mode != cl->cmode) {
                        if (old_mode != HTB_CAN_SEND)
-                               htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
+                               htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
                        if (cl->cmode != HTB_CAN_SEND)
                                htb_add_to_wait_tree(q, cl, diff);
                }
@@ -664,18 +681,20 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
  * next pending event (0 for no event in pq, q->now for too many events).
  * Note: Applied are events whose have cl->pq_key <= q->now.
  */
-static psched_time_t htb_do_events(struct htb_sched *q, int level,
-                                  unsigned long start)
+static s64 htb_do_events(struct htb_sched *q, const int level,
+                        unsigned long start)
 {
        /* don't run for longer than 2 jiffies; 2 is used instead of
         * 1 to simplify things when jiffy is going to be incremented
         * too soon
         */
        unsigned long stop_at = start + 2;
+       struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
+
        while (time_before(jiffies, stop_at)) {
                struct htb_class *cl;
                s64 diff;
-               struct rb_node *p = rb_first(&q->wait_pq[level]);
+               struct rb_node *p = rb_first(wait_pq);
 
                if (!p)
                        return 0;
@@ -684,7 +703,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
                if (cl->pq_key > q->now)
                        return cl->pq_key;
 
-               htb_safe_rb_erase(p, q->wait_pq + level);
+               htb_safe_rb_erase(p, wait_pq);
                diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
                htb_change_class_mode(q, cl, &diff);
                if (cl->cmode != HTB_CAN_SEND)
@@ -728,8 +747,7 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
  *
  * Find leaf where current feed pointers points to.
  */
-static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
-                                        struct rb_node **pptr, u32 * pid)
+static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
 {
        int i;
        struct {
@@ -738,10 +756,10 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
                u32 *pid;
        } stk[TC_HTB_MAXDEPTH], *sp = stk;
 
-       BUG_ON(!tree->rb_node);
-       sp->root = tree->rb_node;
-       sp->pptr = pptr;
-       sp->pid = pid;
+       BUG_ON(!hprio->row.rb_node);
+       sp->root = hprio->row.rb_node;
+       sp->pptr = &hprio->ptr;
+       sp->pid = &hprio->last_ptr_id;
 
        for (i = 0; i < 65535; i++) {
                if (!*sp->pptr && *sp->pid) {
@@ -768,12 +786,15 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
                        }
                } else {
                        struct htb_class *cl;
+                       struct htb_prio *clp;
+
                        cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
                        if (!cl->level)
                                return cl;
-                       (++sp)->root = cl->un.inner.feed[prio].rb_node;
-                       sp->pptr = cl->un.inner.ptr + prio;
-                       sp->pid = cl->un.inner.last_ptr_id + prio;
+                       clp = &cl->un.inner.clprio[prio];
+                       (++sp)->root = clp->feed.rb_node;
+                       sp->pptr = &clp->ptr;
+                       sp->pid = &clp->last_ptr_id;
                }
        }
        WARN_ON(1);
@@ -783,15 +804,16 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
 /* dequeues packet at given priority and level; call only if
  * you are sure that there is active class at prio/level
  */
-static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
-                                       int level)
+static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
+                                       const int level)
 {
        struct sk_buff *skb = NULL;
        struct htb_class *cl, *start;
+       struct htb_level *hlevel = &q->hlevel[level];
+       struct htb_prio *hprio = &hlevel->hprio[prio];
+
        /* look initial class up in the row */
-       start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
-                                    q->ptr[level] + prio,
-                                    q->last_ptr_id[level] + prio);
+       start = cl = htb_lookup_leaf(hprio, prio);
 
        do {
 next:
@@ -811,9 +833,7 @@ next:
                        if ((q->row_mask[level] & (1 << prio)) == 0)
                                return NULL;
 
-                       next = htb_lookup_leaf(q->row[level] + prio,
-                                              prio, q->ptr[level] + prio,
-                                              q->last_ptr_id[level] + prio);
+                       next = htb_lookup_leaf(hprio, prio);
 
                        if (cl == start)        /* fix start if we just deleted it */
                                start = next;
@@ -826,11 +846,9 @@ next:
                        break;
 
                qdisc_warn_nonwc("htb", cl->un.leaf.q);
-               htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
-                                 ptr[0]) + prio);
-               cl = htb_lookup_leaf(q->row[level] + prio, prio,
-                                    q->ptr[level] + prio,
-                                    q->last_ptr_id[level] + prio);
+               htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
+                                        &q->hlevel[0].hprio[prio].ptr);
+               cl = htb_lookup_leaf(hprio, prio);
 
        } while (cl != start);
 
@@ -839,8 +857,8 @@ next:
                cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
                if (cl->un.leaf.deficit[level] < 0) {
                        cl->un.leaf.deficit[level] += cl->quantum;
-                       htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
-                                         ptr[0]) + prio);
+                       htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
+                                                &q->hlevel[0].hprio[prio].ptr);
                }
                /* this used to be after charge_class but this constelation
                 * gives us slightly better performance
@@ -857,7 +875,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
        struct sk_buff *skb;
        struct htb_sched *q = qdisc_priv(sch);
        int level;
-       psched_time_t next_event;
+       s64 next_event;
        unsigned long start_at;
 
        /* try to dequeue direct packets as high prio (!) to minimize cpu work */
@@ -880,15 +898,14 @@ ok:
        for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
                /* common case optimization - skip event handler quickly */
                int m;
-               psched_time_t event;
+               s64 event = q->near_ev_cache[level];
 
-               if (q->now >= q->near_ev_cache[level]) {
+               if (q->now >= event) {
                        event = htb_do_events(q, level, start_at);
                        if (!event)
                                event = q->now + NSEC_PER_SEC;
                        q->near_ev_cache[level] = event;
-               } else
-                       event = q->near_ev_cache[level];
+               }
 
                if (next_event > event)
                        next_event = event;
@@ -968,10 +985,8 @@ static void htb_reset(struct Qdisc *sch)
        qdisc_watchdog_cancel(&q->watchdog);
        __skb_queue_purge(&q->direct_queue);
        sch->q.qlen = 0;
-       memset(q->row, 0, sizeof(q->row));
+       memset(q->hlevel, 0, sizeof(q->hlevel));
        memset(q->row_mask, 0, sizeof(q->row_mask));
-       memset(q->wait_pq, 0, sizeof(q->wait_pq));
-       memset(q->ptr, 0, sizeof(q->ptr));
        for (i = 0; i < TC_HTB_NUMPRIO; i++)
                INIT_LIST_HEAD(q->drops + i);
 }
@@ -1090,9 +1105,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
 
        memset(&opt, 0, sizeof(opt));
 
-       opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
+       psched_ratecfg_getrate(&opt.rate, &cl->rate);
        opt.buffer = PSCHED_NS2TICKS(cl->buffer);
-       opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
+       psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
        opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
        opt.quantum = cl->quantum;
        opt.prio = cl->prio;
@@ -1117,8 +1132,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 
        if (!cl->level && cl->un.leaf.q)
                cl->qstats.qlen = cl->un.leaf.q->q.qlen;
-       cl->xstats.tokens = cl->tokens;
-       cl->xstats.ctokens = cl->ctokens;
+       cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
+       cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
 
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
@@ -1192,7 +1207,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
        WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
 
        if (parent->cmode != HTB_CAN_SEND)
-               htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
+               htb_safe_rb_erase(&parent->pq_node,
+                                 &q->hlevel[parent->level].wait_pq);
 
        parent->level = 0;
        memset(&parent->un.inner, 0, sizeof(parent->un.inner));
@@ -1200,7 +1216,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
        parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
        parent->tokens = parent->buffer;
        parent->ctokens = parent->cbuffer;
-       parent->t_c = psched_get_time();
+       parent->t_c = ktime_to_ns(ktime_get());
        parent->cmode = HTB_CAN_SEND;
 }
 
@@ -1281,7 +1297,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
                htb_deactivate(q, cl);
 
        if (cl->cmode != HTB_CAN_SEND)
-               htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
+               htb_safe_rb_erase(&cl->pq_node,
+                                 &q->hlevel[cl->level].wait_pq);
 
        if (last_child)
                htb_parent_to_leaf(q, cl, new_q);
@@ -1366,12 +1383,14 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                if (!cl)
                        goto failure;
 
-               err = gen_new_estimator(&cl->bstats, &cl->rate_est,
-                                       qdisc_root_sleeping_lock(sch),
-                                       tca[TCA_RATE] ? : &est.nla);
-               if (err) {
-                       kfree(cl);
-                       goto failure;
+               if (htb_rate_est || tca[TCA_RATE]) {
+                       err = gen_new_estimator(&cl->bstats, &cl->rate_est,
+                                               qdisc_root_sleeping_lock(sch),
+                                               tca[TCA_RATE] ? : &est.nla);
+                       if (err) {
+                               kfree(cl);
+                               goto failure;
+                       }
                }
 
                cl->refcnt = 1;
@@ -1401,7 +1420,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
                        /* remove from evt list because of level change */
                        if (parent->cmode != HTB_CAN_SEND) {
-                               htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
+                               htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
                                parent->cmode = HTB_CAN_SEND;
                        }
                        parent->level = (parent->parent ? parent->parent->level
@@ -1417,8 +1436,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                /* set class to be in HTB_CAN_SEND state */
                cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
                cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
-               cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;        /* 1min */
-               cl->t_c = psched_get_time();
+               cl->mbuffer = 60ULL * NSEC_PER_SEC;     /* 1min */
+               cl->t_c = ktime_to_ns(ktime_get());
                cl->cmode = HTB_CAN_SEND;
 
                /* attach to the hash list and parent's family */
@@ -1459,8 +1478,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        cl->prio = TC_HTB_NUMPRIO - 1;
        }
 
-       psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
-       psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
+       psched_ratecfg_precompute(&cl->rate, &hopt->rate);
+       psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
 
        cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
        cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
index d51852bba01c981c9f9834dad82cfbcfec904508..7c195d972bf0cd437a8b94a6bc80564c1585b82b 100644 (file)
@@ -138,7 +138,7 @@ struct qfq_class {
 
        struct gnet_stats_basic_packed bstats;
        struct gnet_stats_queue qstats;
-       struct gnet_stats_rate_est rate_est;
+       struct gnet_stats_rate_est64 rate_est;
        struct Qdisc *qdisc;
        struct list_head alist;         /* Link for active-classes list. */
        struct qfq_aggregate *agg;      /* Parent aggregate. */
index c8388f3c3426ab862414d1ce67e5ec3e9d13ecde..1aaf1b6e51a2be238bc47797597ed3bcb76de4b1 100644 (file)
@@ -116,14 +116,57 @@ struct tbf_sched_data {
        struct qdisc_watchdog watchdog; /* Watchdog timer */
 };
 
+
+/* GSO packet is too big, segment it so that tbf can transmit
+ * each segment in time
+ */
+static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct tbf_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *segs, *nskb;
+       netdev_features_t features = netif_skb_features(skb);
+       int ret, nb;
+
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+       if (IS_ERR_OR_NULL(segs))
+               return qdisc_reshape_fail(skb, sch);
+
+       nb = 0;
+       while (segs) {
+               nskb = segs->next;
+               segs->next = NULL;
+               if (likely(segs->len <= q->max_size)) {
+                       qdisc_skb_cb(segs)->pkt_len = segs->len;
+                       ret = qdisc_enqueue(segs, q->qdisc);
+               } else {
+                       ret = qdisc_reshape_fail(skb, sch);
+               }
+               if (ret != NET_XMIT_SUCCESS) {
+                       if (net_xmit_drop_count(ret))
+                               sch->qstats.drops++;
+               } else {
+                       nb++;
+               }
+               segs = nskb;
+       }
+       sch->q.qlen += nb;
+       if (nb > 1)
+               qdisc_tree_decrease_qlen(sch, 1 - nb);
+       consume_skb(skb);
+       return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+}
+
 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        int ret;
 
-       if (qdisc_pkt_len(skb) > q->max_size)
+       if (qdisc_pkt_len(skb) > q->max_size) {
+               if (skb_is_gso(skb))
+                       return tbf_segment(skb, sch);
                return qdisc_reshape_fail(skb, sch);
-
+       }
        ret = qdisc_enqueue(skb, q->qdisc);
        if (ret != NET_XMIT_SUCCESS) {
                if (net_xmit_drop_count(ret))
@@ -298,9 +341,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
        q->tokens = q->buffer;
        q->ptokens = q->mtu;
 
-       psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
+       psched_ratecfg_precompute(&q->rate, &rtab->rate);
        if (ptab) {
-               psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
+               psched_ratecfg_precompute(&q->peak, &ptab->rate);
                q->peak_present = true;
        } else {
                q->peak_present = false;
@@ -350,9 +393,9 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
 
        opt.limit = q->limit;
-       opt.rate.rate = psched_ratecfg_getrate(&q->rate);
+       psched_ratecfg_getrate(&opt.rate, &q->rate);
        if (q->peak_present)
-               opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
+               psched_ratecfg_getrate(&opt.peakrate, &q->peak);
        else
                memset(&opt.peakrate, 0, sizeof(opt.peakrate));
        opt.mtu = PSCHED_NS2TICKS(q->mtu);
index 91cfd8f94a19e4c6cecd716fda03027007158816..9a383a8774e80104c674d530349a3d2a044fb2c0 100644 (file)
@@ -86,10 +86,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 
        /* Discarding const is appropriate here.  */
        asoc->ep = (struct sctp_endpoint *)ep;
-       sctp_endpoint_hold(asoc->ep);
-
-       /* Hold the sock.  */
        asoc->base.sk = (struct sock *)sk;
+
+       sctp_endpoint_hold(asoc->ep);
        sock_hold(asoc->base.sk);
 
        /* Initialize the common base substructure.  */
@@ -103,13 +102,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
 
        asoc->state = SCTP_STATE_CLOSED;
-
-       /* Set these values from the socket values, a conversion between
-        * millsecons to seconds/microseconds must also be done.
-        */
-       asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
-       asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
-                                       * 1000;
+       asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
        asoc->frag_point = 0;
        asoc->user_frag = sp->user_frag;
 
@@ -343,8 +336,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        return asoc;
 
 fail_init:
-       sctp_endpoint_put(asoc->ep);
        sock_put(asoc->base.sk);
+       sctp_endpoint_put(asoc->ep);
        return NULL;
 }
 
@@ -356,7 +349,7 @@ struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
 {
        struct sctp_association *asoc;
 
-       asoc = t_new(struct sctp_association, gfp);
+       asoc = kzalloc(sizeof(*asoc), gfp);
        if (!asoc)
                goto fail;
 
index 41145fe31813bc70907a9bdf9bba92886246b5b6..64977ea0f9c55e02988377ef6e2de73693607dea 100644 (file)
@@ -162,7 +162,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
        struct sctp_sockaddr_entry *addr;
 
        /* Add the address to the bind address list.  */
-       addr = t_new(struct sctp_sockaddr_entry, gfp);
+       addr = kzalloc(sizeof(*addr), gfp);
        if (!addr)
                return -ENOMEM;
 
index 69ce21e3716f89fbfc78b050ffc3943455906d27..7135fc0c087a09c972a9d60693d937e0dadf1f7b 100644 (file)
@@ -66,7 +66,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
 }
 
 /* Allocate and initialize datamsg. */
-SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
+static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
 {
        struct sctp_datamsg *msg;
        msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
index 5fbd7bc6bb11077f8af91bf01ea8403c5087c31e..b26999d508ba636e9bc0e98bec8d5732d0fac773 100644 (file)
@@ -192,9 +192,10 @@ struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
        struct sctp_endpoint *ep;
 
        /* Build a local endpoint. */
-       ep = t_new(struct sctp_endpoint, gfp);
+       ep = kzalloc(sizeof(*ep), gfp);
        if (!ep)
                goto fail;
+
        if (!sctp_endpoint_init(ep, sk, gfp))
                goto fail_init;
 
@@ -246,10 +247,9 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 /* Final destructor for endpoint.  */
 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
 {
-       SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
+       struct sock *sk;
 
-       /* Free up the HMAC transform. */
-       crypto_free_hash(sctp_sk(ep->base.sk)->hmac);
+       SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
 
        /* Free the digest buffer */
        kfree(ep->digest);
@@ -270,13 +270,15 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
 
        memset(ep->secret_key, 0, sizeof(ep->secret_key));
 
-       /* Remove and free the port */
-       if (sctp_sk(ep->base.sk)->bind_hash)
-               sctp_put_port(ep->base.sk);
-
        /* Give up our hold on the sock. */
-       if (ep->base.sk)
-               sock_put(ep->base.sk);
+       sk = ep->base.sk;
+       if (sk != NULL) {
+               /* Remove and free the port */
+               if (sctp_sk(sk)->bind_hash)
+                       sctp_put_port(sk);
+
+               sock_put(sk);
+       }
 
        kfree(ep);
        SCTP_DBG_OBJCNT_DEC(ep);
index 4b2c83146aa7e5c47b46b8148d701a4fc3d2189e..4cfc74699a3ff75d3daedbb24bf27b6e3d11f93c 100644 (file)
@@ -589,7 +589,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        struct sctp_association *asoc = NULL;
        struct sctp_transport *transport;
        struct inet_sock *inet;
-       sk_buff_data_t saveip, savesctp;
+       __u16 saveip, savesctp;
        int err;
        struct net *net = dev_net(skb->dev);
 
@@ -903,11 +903,11 @@ hit:
 }
 
 /* Look up an association. BH-safe. */
-SCTP_STATIC
+static
 struct sctp_association *sctp_lookup_association(struct net *net,
                                                 const union sctp_addr *laddr,
                                                 const union sctp_addr *paddr,
-                                           struct sctp_transport **transportp)
+                                                struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
 
index 391a245d520316c865aad51424b94b9fe6fb8bf7..adeaa0e64f52f494b13cd4f6c88d5cfd0c55f3b3 100644 (file)
@@ -145,15 +145,15 @@ static struct notifier_block sctp_inet6addr_notifier = {
 };
 
 /* ICMP error handler. */
-SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-                            u8 type, u8 code, int offset, __be32 info)
+static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                       u8 type, u8 code, int offset, __be32 info)
 {
        struct inet6_dev *idev;
        struct sock *sk;
        struct sctp_association *asoc;
        struct sctp_transport *transport;
        struct ipv6_pinfo *np;
-       sk_buff_data_t saveip, savesctp;
+       __u16 saveip, savesctp;
        int err;
        struct net *net = dev_net(skb->dev);
 
@@ -402,7 +402,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
        read_lock_bh(&in6_dev->lock);
        list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
                /* Add the address to the local list.  */
-               addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
+               addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
                if (addr) {
                        addr->a.v6.sin6_family = AF_INET6;
                        addr->a.v6.sin6_port = 0;
index 32a4625fef7798f782859f3e9f6bda2979235dd9..be35e2dbcc9aed6dc12fd5c2abc02d584d1d8bc5 100644 (file)
@@ -206,6 +206,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
  */
 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
 {
+       memset(q, 0, sizeof(struct sctp_outq));
+
        q->asoc = asoc;
        INIT_LIST_HEAD(&q->out_chunk_list);
        INIT_LIST_HEAD(&q->control_chunk_list);
@@ -213,11 +215,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
        INIT_LIST_HEAD(&q->sacked);
        INIT_LIST_HEAD(&q->abandoned);
 
-       q->fast_rtx = 0;
-       q->outstanding_bytes = 0;
        q->empty = 1;
-       q->cork  = 0;
-       q->out_qlen = 0;
 }
 
 /* Free the outqueue structure and any related pending chunks.
index 4e45ee35d0db149582c4435462baf253647d7774..62526c4770505e0f741123bd009e91a90781f587 100644 (file)
@@ -134,9 +134,15 @@ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_commo
        struct sctp_af *af;
 
        if (epb->type == SCTP_EP_TYPE_ASSOCIATION) {
-           asoc = sctp_assoc(epb);
-           peer = asoc->peer.primary_path;
-           primary = &peer->saddr;
+               asoc = sctp_assoc(epb);
+
+               peer = asoc->peer.primary_path;
+               if (unlikely(peer == NULL)) {
+                       WARN(1, "Association %p with NULL primary path!\n", asoc);
+                       return;
+               }
+
+               primary = &peer->saddr;
        }
 
        rcu_read_lock();
index eaee00c61139b974572e8c4aed425176f3ae2894..1de49c802d835be68344a8ab67e41a8e4b94346d 100644 (file)
@@ -153,7 +153,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
 
        for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
                /* Add the address to the local list.  */
-               addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
+               addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
                if (addr) {
                        addr->a.v4.sin_family = AF_INET;
                        addr->a.v4.sin_port = 0;
@@ -178,7 +178,7 @@ static void sctp_get_local_addr_list(struct net *net)
 
        rcu_read_lock();
        for_each_netdev_rcu(net, dev) {
-               __list_for_each(pos, &sctp_address_families) {
+               list_for_each(pos, &sctp_address_families) {
                        af = list_entry(pos, struct sctp_af, list);
                        af->copy_addrlist(&net->sctp.local_addr_list, dev);
                }
@@ -1312,7 +1312,7 @@ static struct pernet_operations sctp_net_ops = {
 };
 
 /* Initialize the universe into something sensible.  */
-SCTP_STATIC __init int sctp_init(void)
+static __init int sctp_init(void)
 {
        int i;
        int status = -EINVAL;
@@ -1499,7 +1499,7 @@ err_chunk_cachep:
 }
 
 /* Exit handler for the SCTP protocol.  */
-SCTP_STATIC __exit void sctp_exit(void)
+static __exit void sctp_exit(void)
 {
        /* BUG.  This should probably do something useful like clean
         * up all the remaining associations and all that memory.
index cf579e71cff0652cd2fea2d7aae8c8343ecc66f7..dd71f1f9ba10f40b1bf16efb809acd62efffd1d0 100644 (file)
@@ -68,9 +68,8 @@
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-SCTP_STATIC
-struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
-                                  __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
+                                         __u8 type, __u8 flags, int paylen);
 static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const struct sctp_chunk *init_chunk,
@@ -1353,9 +1352,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk)
 /* Create a new chunk, setting the type and flags headers from the
  * arguments, reserving enough space for a 'paylen' byte payload.
  */
-SCTP_STATIC
-struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
-                                  __u8 type, __u8 flags, int paylen)
+static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
+                                         __u8 type, __u8 flags, int paylen)
 {
        struct sctp_chunk *retval;
        sctp_chunkhdr_t *chunk_hdr;
@@ -1632,8 +1630,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
        cookie->c.adaptation_ind = asoc->peer.adaptation_ind;
 
        /* Set an expiration time for the cookie.  */
-       do_gettimeofday(&cookie->c.expiration);
-       TIMEVAL_ADD(asoc->cookie_life, cookie->c.expiration);
+       cookie->c.expiration = ktime_add(asoc->cookie_life,
+                                        ktime_get());
 
        /* Copy the peer's init packet.  */
        memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
@@ -1682,7 +1680,7 @@ struct sctp_association *sctp_unpack_cookie(
        unsigned int len;
        sctp_scope_t scope;
        struct sk_buff *skb = chunk->skb;
-       struct timeval tv;
+       ktime_t kt;
        struct hash_desc desc;
 
        /* Header size is static data prior to the actual cookie, including
@@ -1759,11 +1757,11 @@ no_hmac:
         * down the new association establishment instead of every packet.
         */
        if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
-               skb_get_timestamp(skb, &tv);
+               kt = skb_get_ktime(skb);
        else
-               do_gettimeofday(&tv);
+               kt = ktime_get();
 
-       if (!asoc && tv_lt(bear_cookie->expiration, tv)) {
+       if (!asoc && ktime_compare(bear_cookie->expiration, kt) < 0) {
                /*
                 * Section 3.3.10.3 Stale Cookie Error (3)
                 *
@@ -1775,9 +1773,7 @@ no_hmac:
                len = ntohs(chunk->chunk_hdr->length);
                *errp = sctp_make_op_error_space(asoc, chunk, len);
                if (*errp) {
-                       suseconds_t usecs = (tv.tv_sec -
-                               bear_cookie->expiration.tv_sec) * 1000000L +
-                               tv.tv_usec - bear_cookie->expiration.tv_usec;
+                       suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration));
                        __be32 n = htonl(usecs);
 
                        sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
@@ -2516,8 +2512,7 @@ do_addr_param:
                /* Suggested Cookie Life span increment's unit is msec,
                 * (1/1000sec).
                 */
-               asoc->cookie_life.tv_sec += stale / 1000;
-               asoc->cookie_life.tv_usec += (stale % 1000) * 1000;
+               asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale);
                break;
 
        case SCTP_PARAM_HOST_NAME_ADDRESS:
index 8aab894aeabeecb410c9763e33824f5dec7a6c91..ff91f47b0239e98d966e877dd74b3546c294f536 100644 (file)
@@ -864,6 +864,7 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
            (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
                return;
 
+       BUG_ON(asoc->peer.primary_path == NULL);
        sctp_unhash_established(asoc);
        sctp_association_free(asoc);
 }
@@ -1274,8 +1275,10 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                                sctp_outq_uncork(&asoc->outqueue);
                                local_cork = 0;
                        }
-                       asoc = cmd->obj.asoc;
+
                        /* Register with the endpoint.  */
+                       asoc = cmd->obj.asoc;
+                       BUG_ON(asoc->peer.primary_path == NULL);
                        sctp_endpoint_add_asoc(ep, asoc);
                        sctp_hash_established(asoc);
                        break;
index de1a0138317f482c028ce583c335501b14d9f917..b3d186856513235d7f3f8fdd580e6ffaa6275127 100644 (file)
@@ -4632,16 +4632,16 @@ sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
        if (!repl)
                goto nomem;
 
+       /* Choose transport for INIT. */
+       sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
+                       SCTP_CHUNK(repl));
+
        /* Cast away the const modifier, as we want to just
         * rerun it through as a sideffect.
         */
        my_asoc = (struct sctp_association *)asoc;
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc));
 
-       /* Choose transport for INIT. */
-       sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
-                       SCTP_CHUNK(repl));
-
        /* After sending the INIT, "A" starts the T1-init timer and
         * enters the COOKIE-WAIT state.
         */
index f631c5ff4dbfd84426d140920b15650972f34646..66fcdcfe1b7441a135f2eb95cf5b27ab1de6d054 100644 (file)
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-/* WARNING:  Please do not remove the SCTP_STATIC attribute to
- * any of the functions below as they are used to export functions
- * used by a project regression testsuite.
- */
-
 /* Forward declarations for internal helper functions. */
 static int sctp_writeable(struct sock *sk);
 static void sctp_wfree(struct sk_buff *skb);
@@ -98,6 +93,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
 static int sctp_wait_for_accept(struct sock *sk, long timeo);
 static void sctp_wait_for_close(struct sock *sk, long timeo);
+static void sctp_destruct_sock(struct sock *sk);
 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
                                        union sctp_addr *addr, int len);
 static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
@@ -279,7 +275,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
  *             sockaddr_in6 [RFC 2553]),
  *   addr_len - the size of the address structure.
  */
-SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
+static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
 {
        int retval = 0;
 
@@ -333,7 +329,7 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
 }
 
 /* Bind a local address either to an endpoint or to an association.  */
-SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
 {
        struct net *net = sock_net(sk);
        struct sctp_sock *sp = sctp_sk(sk);
@@ -964,9 +960,9 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
  *
  * Returns 0 if ok, <0 errno code on error.
  */
-SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
-                                     struct sockaddr __user *addrs,
-                                     int addrs_size, int op)
+static int sctp_setsockopt_bindx(struct sock* sk,
+                                struct sockaddr __user *addrs,
+                                int addrs_size, int op)
 {
        struct sockaddr *kaddrs;
        int err;
@@ -1312,7 +1308,7 @@ out_free:
  *
  * Returns >=0 if ok, <0 errno code on error.
  */
-SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
+static int __sctp_setsockopt_connectx(struct sock* sk,
                                      struct sockaddr __user *addrs,
                                      int addrs_size,
                                      sctp_assoc_t *assoc_id)
@@ -1350,9 +1346,9 @@ SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
  * This is an older interface.  It's kept for backward compatibility
  * to the option that doesn't provide association id.
  */
-SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
-                                     struct sockaddr __user *addrs,
-                                     int addrs_size)
+static int sctp_setsockopt_connectx_old(struct sock* sk,
+                                       struct sockaddr __user *addrs,
+                                       int addrs_size)
 {
        return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
 }
@@ -1363,9 +1359,9 @@ SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
  * indication to the call.  Error is always negative and association id is
  * always positive.
  */
-SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
-                                     struct sockaddr __user *addrs,
-                                     int addrs_size)
+static int sctp_setsockopt_connectx(struct sock* sk,
+                                   struct sockaddr __user *addrs,
+                                   int addrs_size)
 {
        sctp_assoc_t assoc_id = 0;
        int err = 0;
@@ -1386,9 +1382,9 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
  * addrs_num structure member.  That way we can re-use the existing
  * code.
  */
-SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
-                                       char __user *optval,
-                                       int __user *optlen)
+static int sctp_getsockopt_connectx3(struct sock* sk, int len,
+                                    char __user *optval,
+                                    int __user *optlen)
 {
        struct sctp_getaddrs_old param;
        sctp_assoc_t assoc_id = 0;
@@ -1464,7 +1460,7 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
  * shutdown phase does not finish during this period, close() will
  * return but the graceful shutdown phase continues in the system.
  */
-SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
+static void sctp_close(struct sock *sk, long timeout)
 {
        struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
@@ -1573,10 +1569,10 @@ static int sctp_error(struct sock *sk, int flags, int err)
  */
 /* BUG:  We do not implement the equivalent of sk_stream_wait_memory(). */
 
-SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
+static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
 
-SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                            struct msghdr *msg, size_t msg_len)
+static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
+                       struct msghdr *msg, size_t msg_len)
 {
        struct net *net = sock_net(sk);
        struct sctp_sock *sp;
@@ -2034,9 +2030,9 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
  */
 static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
-SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
-                            struct msghdr *msg, size_t len, int noblock,
-                            int flags, int *addr_len)
+static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
+                       struct msghdr *msg, size_t len, int noblock,
+                       int flags, int *addr_len)
 {
        struct sctp_ulpevent *event = NULL;
        struct sctp_sock *sp = sctp_sk(sk);
@@ -2915,13 +2911,8 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsig
                        asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
                }
 
-               if (assocparams.sasoc_cookie_life != 0) {
-                       asoc->cookie_life.tv_sec =
-                                       assocparams.sasoc_cookie_life / 1000;
-                       asoc->cookie_life.tv_usec =
-                                       (assocparams.sasoc_cookie_life % 1000)
-                                       * 1000;
-               }
+               if (assocparams.sasoc_cookie_life != 0)
+                       asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
        } else {
                /* Set the values to the endpoint */
                struct sctp_sock *sp = sctp_sk(sk);
@@ -3565,8 +3556,8 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
  *   optval  - the buffer to store the value of the option.
  *   optlen  - the size of the buffer.
  */
-SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
-                               char __user *optval, unsigned int optlen)
+static int sctp_setsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, unsigned int optlen)
 {
        int retval = 0;
 
@@ -3725,8 +3716,8 @@ out_nounlock:
  *
  * len: the size of the address.
  */
-SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
-                            int addr_len)
+static int sctp_connect(struct sock *sk, struct sockaddr *addr,
+                       int addr_len)
 {
        int err = 0;
        struct sctp_af *af;
@@ -3752,7 +3743,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
 }
 
 /* FIXME: Write comments. */
-SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags)
+static int sctp_disconnect(struct sock *sk, int flags)
 {
        return -EOPNOTSUPP; /* STUB */
 }
@@ -3764,7 +3755,7 @@ SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags)
  * descriptor will be returned from accept() to represent the newly
  * formed association.
  */
-SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
+static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
 {
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
@@ -3817,7 +3808,7 @@ out:
 }
 
 /* The SCTP ioctl handler. */
-SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
        int rc = -ENOTCONN;
 
@@ -3859,10 +3850,9 @@ out:
  * initialized the SCTP-specific portion of the sock.
  * The sock structure should already be zero-filled memory.
  */
-SCTP_STATIC int sctp_init_sock(struct sock *sk)
+static int sctp_init_sock(struct sock *sk)
 {
        struct net *net = sock_net(sk);
-       struct sctp_endpoint *ep;
        struct sctp_sock *sp;
 
        SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk);
@@ -3971,13 +3961,14 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
         * change the data structure relationships, this may still
         * be useful for storing pre-connect address information.
         */
-       ep = sctp_endpoint_new(sk, GFP_KERNEL);
-       if (!ep)
+       sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
+       if (!sp->ep)
                return -ENOMEM;
 
-       sp->ep = ep;
        sp->hmac = NULL;
 
+       sk->sk_destruct = sctp_destruct_sock;
+
        SCTP_DBG_OBJCNT_INC(sock);
 
        local_bh_disable();
@@ -3995,7 +3986,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
 }
 
 /* Cleanup any SCTP per socket resources.  */
-SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
+static void sctp_destroy_sock(struct sock *sk)
 {
        struct sctp_sock *sp;
 
@@ -4003,6 +3994,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
 
        /* Release our hold on the endpoint. */
        sp = sctp_sk(sk);
+       /* This could happen during socket init, thus we bail out
+        * early, since the rest of the below is not setup either.
+        */
+       if (sp->ep == NULL)
+               return;
+
        if (sp->do_auto_asconf) {
                sp->do_auto_asconf = 0;
                list_del(&sp->auto_asconf_list);
@@ -4014,6 +4011,17 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
        local_bh_enable();
 }
 
+/* Triggered when there are no references on the socket anymore */
+static void sctp_destruct_sock(struct sock *sk)
+{
+       struct sctp_sock *sp = sctp_sk(sk);
+
+       /* Free up the HMAC transform. */
+       crypto_free_hash(sp->hmac);
+
+       inet_sock_destruct(sk);
+}
+
 /* API 4.1.7 shutdown() - TCP Style Syntax
  *     int shutdown(int socket, int how);
  *
@@ -4030,7 +4038,7 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
  *                     Disables further send  and  receive  operations
  *                     and initiates the SCTP shutdown sequence.
  */
-SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
+static void sctp_shutdown(struct sock *sk, int how)
 {
        struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
@@ -5075,10 +5083,7 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len,
                assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
                assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
                assocparams.sasoc_local_rwnd = asoc->a_rwnd;
-               assocparams.sasoc_cookie_life = (asoc->cookie_life.tv_sec
-                                               * 1000) +
-                                               (asoc->cookie_life.tv_usec
-                                               / 1000);
+               assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
 
                list_for_each(pos, &asoc->peer.transport_addr_list) {
                        cnt ++;
@@ -5702,8 +5707,8 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
        return 0;
 }
 
-SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
-                               char __user *optval, int __user *optlen)
+static int sctp_getsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, int __user *optlen)
 {
        int retval = 0;
        int len;
@@ -6031,7 +6036,6 @@ fail:
  */
 static int sctp_get_port(struct sock *sk, unsigned short snum)
 {
-       long ret;
        union sctp_addr addr;
        struct sctp_af *af = sctp_sk(sk)->pf->af;
 
@@ -6040,15 +6044,13 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
        addr.v4.sin_port = htons(snum);
 
        /* Note: sk->sk_num gets filled in if ephemeral port request. */
-       ret = sctp_get_port_local(sk, &addr);
-
-       return ret ? 1 : 0;
+       return !!sctp_get_port_local(sk, &addr);
 }
 
 /*
  *  Move a socket to LISTENING state.
  */
-SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
+static int sctp_listen_start(struct sock *sk, int backlog)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_endpoint *ep = sp->ep;
@@ -6335,8 +6337,7 @@ static int sctp_autobind(struct sock *sk)
  * msg_control
  * points here
  */
-SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
-                                 sctp_cmsgs_t *cmsgs)
+static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
 {
        struct cmsghdr *cmsg;
        struct msghdr *my_msg = (struct msghdr *)msg;
@@ -6858,7 +6859,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
        newsk->sk_reuse = sk->sk_reuse;
 
        newsk->sk_shutdown = sk->sk_shutdown;
-       newsk->sk_destruct = inet_sock_destruct;
+       newsk->sk_destruct = sctp_destruct_sock;
        newsk->sk_family = sk->sk_family;
        newsk->sk_protocol = IPPROTO_SCTP;
        newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
index bf3c6e8fc4017a64f93ff91c0ce96c8debbf44f4..9a5c4c9eddafa0e4f91cabb56847c8c80bf8b59b 100644 (file)
@@ -62,12 +62,12 @@ extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
 extern int sysctl_sctp_wmem[3];
 
-static int proc_sctp_do_hmac_alg(ctl_table *ctl,
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
                                int write,
                                void __user *buffer, size_t *lenp,
 
                                loff_t *ppos);
-static ctl_table sctp_table[] = {
+static struct ctl_table sctp_table[] = {
        {
                .procname       = "sctp_mem",
                .data           = &sysctl_sctp_mem,
@@ -93,7 +93,7 @@ static ctl_table sctp_table[] = {
        { /* sentinel */ }
 };
 
-static ctl_table sctp_net_table[] = {
+static struct ctl_table sctp_net_table[] = {
        {
                .procname       = "rto_initial",
                .data           = &init_net.sctp.rto_initial,
@@ -300,14 +300,14 @@ static ctl_table sctp_net_table[] = {
        { /* sentinel */ }
 };
 
-static int proc_sctp_do_hmac_alg(ctl_table *ctl,
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
                                int write,
                                void __user *buffer, size_t *lenp,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
        char tmp[8];
-       ctl_table tbl;
+       struct ctl_table tbl;
        int ret;
        int changed = 0;
        char *none = "none";
index 098f1d5f769e264b73aa8b3800ccf3a4eeb75c92..5d3c71bbd197b1804273f26658273c86fa9f9b4a 100644 (file)
@@ -116,7 +116,7 @@ struct sctp_transport *sctp_transport_new(struct net *net,
 {
        struct sctp_transport *transport;
 
-       transport = t_new(struct sctp_transport, gfp);
+       transport = kzalloc(sizeof(*transport), gfp);
        if (!transport)
                goto fail;
 
index 396c45174e5b696d90c6940d7aa0ce511c70293c..b46019568a86d15c71dc805c8d1b3e560866d726 100644 (file)
@@ -161,8 +161,8 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
 
 
 /* Initialize a Gap Ack Block iterator from memory being provided.  */
-SCTP_STATIC void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map,
-                                      struct sctp_tsnmap_iter *iter)
+static void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map,
+                                 struct sctp_tsnmap_iter *iter)
 {
        /* Only start looking one past the Cumulative TSN Ack Point.  */
        iter->start = map->cumulative_tsn_ack_point + 1;
@@ -171,9 +171,9 @@ SCTP_STATIC void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map,
 /* Get the next Gap Ack Blocks. Returns 0 if there was not another block
  * to get.
  */
-SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
-                                        struct sctp_tsnmap_iter *iter,
-                                        __u16 *start, __u16 *end)
+static int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
+                                   struct sctp_tsnmap_iter *iter,
+                                   __u16 *start, __u16 *end)
 {
        int ended = 0;
        __u16 start_ = 0, end_ = 0, offset;
index 10c018a5b9fee066c35c364c79cd6fef77e31580..44a45dbee4df37e835b258cb551e333aeb7582b9 100644 (file)
@@ -57,9 +57,9 @@ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
 
 
 /* Initialize an ULP event from an given skb.  */
-SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event,
-                                   int msg_flags,
-                                   unsigned int len)
+static void sctp_ulpevent_init(struct sctp_ulpevent *event,
+                              int msg_flags,
+                              unsigned int len)
 {
        memset(event, 0, sizeof(struct sctp_ulpevent));
        event->msg_flags = msg_flags;
@@ -67,8 +67,8 @@ SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event,
 }
 
 /* Create a new sctp_ulpevent.  */
-SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags,
-                                                   gfp_t gfp)
+static struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags,
+                                              gfp_t gfp)
 {
        struct sctp_ulpevent *event;
        struct sk_buff *skb;
index 6b94633ca61d67e29faa58a7d42410e1ef678193..4da14cbd49b6486d08ec840e714b9f583835b685 100644 (file)
 #include <linux/route.h>
 #include <linux/sockios.h>
 #include <linux/atalk.h>
+#include <net/ll_poll.h>
+
+#ifdef CONFIG_NET_LL_RX_POLL
+unsigned int sysctl_net_ll_read __read_mostly;
+unsigned int sysctl_net_ll_poll __read_mostly;
+#endif
 
 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
@@ -1142,13 +1148,24 @@ EXPORT_SYMBOL(sock_create_lite);
 /* No kernel lock held - perfect */
 static unsigned int sock_poll(struct file *file, poll_table *wait)
 {
+       unsigned int ll_flag = 0;
        struct socket *sock;
 
        /*
         *      We can't return errors to poll, so it's either yes or no.
         */
        sock = file->private_data;
-       return sock->ops->poll(file, sock, wait);
+
+       if (sk_valid_ll(sock->sk)) {
+               /* this socket can poll_ll so tell the system call */
+               ll_flag = POLL_LL;
+
+               /* once, only if requested by syscall */
+               if (wait && (wait->_key & POLL_LL))
+                       sk_poll_ll(sock->sk, 1);
+       }
+
+       return ll_flag | sock->ops->poll(file, sock, wait);
 }
 
 static int sock_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1956,7 +1973,7 @@ struct used_address {
        unsigned int name_len;
 };
 
-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
                         struct msghdr *msg_sys, unsigned int flags,
                         struct used_address *used_address)
 {
@@ -2071,22 +2088,30 @@ out:
  *     BSD sendmsg interface
  */
 
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
 {
        int fput_needed, err;
        struct msghdr msg_sys;
-       struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+       struct socket *sock;
 
+       sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
 
-       err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+       err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
 
        fput_light(sock->file, fput_needed);
 out:
        return err;
 }
 
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+{
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_sendmsg(fd, msg, flags);
+}
+
 /*
  *     Linux sendmmsg interface
  */
@@ -2117,15 +2142,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
 
        while (datagrams < vlen) {
                if (MSG_CMSG_COMPAT & flags) {
-                       err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
-                                           &msg_sys, flags, &used_address);
+                       err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+                                            &msg_sys, flags, &used_address);
                        if (err < 0)
                                break;
                        err = __put_user(err, &compat_entry->msg_len);
                        ++compat_entry;
                } else {
-                       err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
-                                           &msg_sys, flags, &used_address);
+                       err = ___sys_sendmsg(sock,
+                                            (struct msghdr __user *)entry,
+                                            &msg_sys, flags, &used_address);
                        if (err < 0)
                                break;
                        err = put_user(err, &entry->msg_len);
@@ -2149,10 +2175,12 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
 SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
                unsigned int, vlen, unsigned int, flags)
 {
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
        return __sys_sendmmsg(fd, mmsg, vlen, flags);
 }
 
-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
                         struct msghdr *msg_sys, unsigned int flags, int nosec)
 {
        struct compat_msghdr __user *msg_compat =
@@ -2244,23 +2272,31 @@ out:
  *     BSD recvmsg interface
  */
 
-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
-               unsigned int, flags)
+long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
 {
        int fput_needed, err;
        struct msghdr msg_sys;
-       struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+       struct socket *sock;
 
+       sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
 
-       err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+       err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
 
        fput_light(sock->file, fput_needed);
 out:
        return err;
 }
 
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+               unsigned int, flags)
+{
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_recvmsg(fd, msg, flags);
+}
+
 /*
  *     Linux recvmmsg interface
  */
@@ -2298,17 +2334,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
                 * No need to ask LSM for more than the first datagram.
                 */
                if (MSG_CMSG_COMPAT & flags) {
-                       err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
-                                           &msg_sys, flags & ~MSG_WAITFORONE,
-                                           datagrams);
+                       err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+                                            &msg_sys, flags & ~MSG_WAITFORONE,
+                                            datagrams);
                        if (err < 0)
                                break;
                        err = __put_user(err, &compat_entry->msg_len);
                        ++compat_entry;
                } else {
-                       err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
-                                           &msg_sys, flags & ~MSG_WAITFORONE,
-                                           datagrams);
+                       err = ___sys_recvmsg(sock,
+                                            (struct msghdr __user *)entry,
+                                            &msg_sys, flags & ~MSG_WAITFORONE,
+                                            datagrams);
                        if (err < 0)
                                break;
                        err = put_user(err, &entry->msg_len);
@@ -2375,6 +2412,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
        int datagrams;
        struct timespec timeout_sys;
 
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+
        if (!timeout)
                return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
 
@@ -2612,7 +2652,9 @@ static int __init sock_init(void)
         */
 
 #ifdef CONFIG_NETFILTER
-       netfilter_init();
+       err = netfilter_init();
+       if (err)
+               goto out;
 #endif
 
 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
index 7da6b457f66abfab016fd8b21aeedcb14d5e7ff0..fc2f78d6a9b46fae51a3ba366bc50c23e9238101 100644 (file)
@@ -52,6 +52,8 @@
 #include <linux/sunrpc/gss_api.h>
 #include <asm/uaccess.h>
 
+#include "../netns.h"
+
 static const struct rpc_authops authgss_ops;
 
 static const struct rpc_credops gss_credops;
@@ -85,8 +87,6 @@ struct gss_auth {
 };
 
 /* pipe_version >= 0 if and only if someone has a pipe open. */
-static int pipe_version = -1;
-static atomic_t pipe_users = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(pipe_version_lock);
 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
@@ -266,24 +266,27 @@ struct gss_upcall_msg {
        char databuf[UPCALL_BUF_LEN];
 };
 
-static int get_pipe_version(void)
+static int get_pipe_version(struct net *net)
 {
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        int ret;
 
        spin_lock(&pipe_version_lock);
-       if (pipe_version >= 0) {
-               atomic_inc(&pipe_users);
-               ret = pipe_version;
+       if (sn->pipe_version >= 0) {
+               atomic_inc(&sn->pipe_users);
+               ret = sn->pipe_version;
        } else
                ret = -EAGAIN;
        spin_unlock(&pipe_version_lock);
        return ret;
 }
 
-static void put_pipe_version(void)
+static void put_pipe_version(struct net *net)
 {
-       if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
-               pipe_version = -1;
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+       if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
+               sn->pipe_version = -1;
                spin_unlock(&pipe_version_lock);
        }
 }
@@ -291,9 +294,10 @@ static void put_pipe_version(void)
 static void
 gss_release_msg(struct gss_upcall_msg *gss_msg)
 {
+       struct net *net = rpc_net_ns(gss_msg->auth->client);
        if (!atomic_dec_and_test(&gss_msg->count))
                return;
-       put_pipe_version();
+       put_pipe_version(net);
        BUG_ON(!list_empty(&gss_msg->list));
        if (gss_msg->ctx != NULL)
                gss_put_ctx(gss_msg->ctx);
@@ -439,7 +443,10 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
                                struct rpc_clnt *clnt,
                                const char *service_name)
 {
-       if (pipe_version == 0)
+       struct net *net = rpc_net_ns(clnt);
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+       if (sn->pipe_version == 0)
                gss_encode_v0_msg(gss_msg);
        else /* pipe_version == 1 */
                gss_encode_v1_msg(gss_msg, clnt, service_name);
@@ -455,7 +462,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
        gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
        if (gss_msg == NULL)
                return ERR_PTR(-ENOMEM);
-       vers = get_pipe_version();
+       vers = get_pipe_version(rpc_net_ns(clnt));
        if (vers < 0) {
                kfree(gss_msg);
                return ERR_PTR(vers);
@@ -559,24 +566,34 @@ out:
 static inline int
 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
 {
+       struct net *net = rpc_net_ns(gss_auth->client);
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        struct rpc_pipe *pipe;
        struct rpc_cred *cred = &gss_cred->gc_base;
        struct gss_upcall_msg *gss_msg;
+       unsigned long timeout;
        DEFINE_WAIT(wait);
-       int err = 0;
+       int err;
 
        dprintk("RPC:       %s for uid %u\n",
                __func__, from_kuid(&init_user_ns, cred->cr_uid));
 retry:
+       err = 0;
+       /* Default timeout is 15s unless we know that gssd is not running */
+       timeout = 15 * HZ;
+       if (!sn->gssd_running)
+               timeout = HZ >> 2;
        gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
        if (PTR_ERR(gss_msg) == -EAGAIN) {
                err = wait_event_interruptible_timeout(pipe_version_waitqueue,
-                               pipe_version >= 0, 15*HZ);
-               if (pipe_version < 0) {
+                               sn->pipe_version >= 0, timeout);
+               if (sn->pipe_version < 0) {
+                       if (err == 0)
+                               sn->gssd_running = 0;
                        warn_gssd();
                        err = -EACCES;
                }
-               if (err)
+               if (err < 0)
                        goto out;
                goto retry;
        }
@@ -707,20 +724,22 @@ out:
 
 static int gss_pipe_open(struct inode *inode, int new_version)
 {
+       struct net *net = inode->i_sb->s_fs_info;
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        int ret = 0;
 
        spin_lock(&pipe_version_lock);
-       if (pipe_version < 0) {
+       if (sn->pipe_version < 0) {
                /* First open of any gss pipe determines the version: */
-               pipe_version = new_version;
+               sn->pipe_version = new_version;
                rpc_wake_up(&pipe_version_rpc_waitqueue);
                wake_up(&pipe_version_waitqueue);
-       } else if (pipe_version != new_version) {
+       } else if (sn->pipe_version != new_version) {
                /* Trying to open a pipe of a different version */
                ret = -EBUSY;
                goto out;
        }
-       atomic_inc(&pipe_users);
+       atomic_inc(&sn->pipe_users);
 out:
        spin_unlock(&pipe_version_lock);
        return ret;
@@ -740,6 +759,7 @@ static int gss_pipe_open_v1(struct inode *inode)
 static void
 gss_pipe_release(struct inode *inode)
 {
+       struct net *net = inode->i_sb->s_fs_info;
        struct rpc_pipe *pipe = RPC_I(inode)->pipe;
        struct gss_upcall_msg *gss_msg;
 
@@ -758,7 +778,7 @@ restart:
        }
        spin_unlock(&pipe->lock);
 
-       put_pipe_version();
+       put_pipe_version(net);
 }
 
 static void
index 871c73c921654b14b14eface6b4f2cfe0f27d5d2..29b4ba93ab3cb795fb8dd307f6e248bfcc3a1495 100644 (file)
@@ -1287,7 +1287,7 @@ static bool use_gss_proxy(struct net *net)
 
 #ifdef CONFIG_PROC_FS
 
-static bool set_gss_proxy(struct net *net, int type)
+static int set_gss_proxy(struct net *net, int type)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        int ret = 0;
@@ -1317,10 +1317,12 @@ static inline bool gssp_ready(struct sunrpc_net *sn)
        return false;
 }
 
-static int wait_for_gss_proxy(struct net *net)
+static int wait_for_gss_proxy(struct net *net, struct file *file)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
+       if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
+               return -EAGAIN;
        return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
 }
 
@@ -1362,7 +1364,7 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
        size_t len;
        int ret;
 
-       ret = wait_for_gss_proxy(net);
+       ret = wait_for_gss_proxy(net, file);
        if (ret)
                return ret;
 
index 7111a4c9113baceef38042da1ff41a100ff58b21..74d948f5d5a1d399d13061ee5c910c2cacc6f720 100644 (file)
@@ -28,7 +28,11 @@ struct sunrpc_net {
        wait_queue_head_t gssp_wq;
        struct rpc_clnt *gssp_clnt;
        int use_gss_proxy;
+       int pipe_version;
+       atomic_t pipe_users;
        struct proc_dir_entry *use_gssp_proc;
+
+       unsigned int gssd_running;
 };
 
 extern int sunrpc_net_id;
index a9129f8d70706f5e33ac8f39cb3ea8a3e5825ca2..e7ce4b3eb0bdde4f209ba2cdf6cd1e3dbcca03f9 100644 (file)
@@ -216,11 +216,14 @@ rpc_destroy_inode(struct inode *inode)
 static int
 rpc_pipe_open(struct inode *inode, struct file *filp)
 {
+       struct net *net = inode->i_sb->s_fs_info;
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        struct rpc_pipe *pipe;
        int first_open;
        int res = -ENXIO;
 
        mutex_lock(&inode->i_mutex);
+       sn->gssd_running = 1;
        pipe = RPC_I(inode)->pipe;
        if (pipe == NULL)
                goto out;
@@ -1069,6 +1072,8 @@ void rpc_pipefs_init_net(struct net *net)
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
        mutex_init(&sn->pipefs_sb_lock);
+       sn->gssd_running = 1;
+       sn->pipe_version = -1;
 }
 
 /*
index f8529fc8e54275c5b7b9809f0219f20608ffb472..5356b120dbf8e2fe61ba88081d1fab1941f135a9 100644 (file)
@@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
  * Note: If the task is ASYNC, and is being made runnable after sitting on an
  * rpc_wait_queue, this must be called with the queue spinlock held to protect
  * the wait queue operation.
+ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
+ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
+ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
+ * the RPC_TASK_RUNNING flag.
  */
 static void rpc_make_runnable(struct rpc_task *task)
 {
+       bool need_wakeup = !rpc_test_and_set_running(task);
+
        rpc_clear_queued(task);
-       if (rpc_test_and_set_running(task))
+       if (!need_wakeup)
                return;
        if (RPC_IS_ASYNC(task)) {
                INIT_WORK(&task->u.tk_work, rpc_async_schedule);
index c3f9e1ef7f531857f432993896d13d0ae8442876..06bdf5a1082c850030650fc3957f47ef75ebda8a 100644 (file)
@@ -810,11 +810,15 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
                goto badcred;
        argv->iov_base = (void*)((__be32*)argv->iov_base + slen);       /* skip machname */
        argv->iov_len -= slen*4;
-
+       /*
+        * Note: we skip uid_valid()/gid_valid() checks here for
+        * backwards compatibility with clients that use -1 id's.
+        * Instead, -1 uid or gid is later mapped to the
+        * (export-specific) anonymous id by nfsd_setuser.
+        * Supplementary gid's will be left alone.
+        */
        cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
        cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
-       if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid))
-               goto badcred;
        slen = svc_getnl(argv);                 /* gids length */
        if (slen > 16 || (len -= (slen + 2)*4) < 0)
                goto badcred;
@@ -823,8 +827,6 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
                return SVC_CLOSE;
        for (i = 0; i < slen; i++) {
                kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
-               if (!gid_valid(kgid))
-                       goto badcred;
                GROUP_AT(cred->cr_group_info, i) = kgid;
        }
        if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
index af7d339add9d5b853174ef8cac0a6016419fc663..c99c58e2ee66155173338a43aec6aff48dc8614e 100644 (file)
@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nlm_debug);
 #ifdef RPC_DEBUG
 
 static struct ctl_table_header *sunrpc_table_header;
-static ctl_table               sunrpc_table[];
+static struct ctl_table sunrpc_table[];
 
 void
 rpc_register_sysctl(void)
@@ -58,7 +58,7 @@ rpc_unregister_sysctl(void)
        }
 }
 
-static int proc_do_xprt(ctl_table *table, int write,
+static int proc_do_xprt(struct ctl_table *table, int write,
                        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        char tmpbuf[256];
@@ -73,7 +73,7 @@ static int proc_do_xprt(ctl_table *table, int write,
 }
 
 static int
-proc_dodebug(ctl_table *table, int write,
+proc_dodebug(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        char            tmpbuf[20], c, *s;
@@ -135,7 +135,7 @@ done:
 }
 
 
-static ctl_table debug_table[] = {
+static struct ctl_table debug_table[] = {
        {
                .procname       = "rpc_debug",
                .data           = &rpc_debug,
@@ -173,7 +173,7 @@ static ctl_table debug_table[] = {
        { }
 };
 
-static ctl_table sunrpc_table[] = {
+static struct ctl_table sunrpc_table[] = {
        {
                .procname       = "sunrpc",
                .mode           = 0555,
index 8343737e85f4d87136fd4adff6c0d4ba296e0c2c..c1b6270262c297f8af8c902c40eec32aa6eae215 100644 (file)
@@ -84,7 +84,7 @@ struct workqueue_struct *svc_rdma_wq;
  * resets the associated statistic to zero. Any read returns it's
  * current value.
  */
-static int read_reset_stat(ctl_table *table, int write,
+static int read_reset_stat(struct ctl_table *table, int write,
                           void __user *buffer, size_t *lenp,
                           loff_t *ppos)
 {
@@ -119,7 +119,7 @@ static int read_reset_stat(ctl_table *table, int write,
 }
 
 static struct ctl_table_header *svcrdma_table_header;
-static ctl_table svcrdma_parm_table[] = {
+static struct ctl_table svcrdma_parm_table[] = {
        {
                .procname       = "max_requests",
                .data           = &svcrdma_max_requests,
@@ -214,7 +214,7 @@ static ctl_table svcrdma_parm_table[] = {
        { },
 };
 
-static ctl_table svcrdma_table[] = {
+static struct ctl_table svcrdma_table[] = {
        {
                .procname       = "svc_rdma",
                .mode           = 0555,
@@ -223,7 +223,7 @@ static ctl_table svcrdma_table[] = {
        { },
 };
 
-static ctl_table svcrdma_root_table[] = {
+static struct ctl_table svcrdma_root_table[] = {
        {
                .procname       = "sunrpc",
                .mode           = 0555,
index 794312f22b9badcb10f3e98d9563672f2b08e534..285dc08841159e609524897e6efcce564a1d25c7 100644 (file)
@@ -86,7 +86,7 @@ static unsigned int max_memreg = RPCRDMA_LAST - 1;
 
 static struct ctl_table_header *sunrpc_table_header;
 
-static ctl_table xr_tunables_table[] = {
+static struct ctl_table xr_tunables_table[] = {
        {
                .procname       = "rdma_slot_table_entries",
                .data           = &xprt_rdma_slot_table_entries,
@@ -138,7 +138,7 @@ static ctl_table xr_tunables_table[] = {
        { },
 };
 
-static ctl_table sunrpc_table[] = {
+static struct ctl_table sunrpc_table[] = {
        {
                .procname       = "sunrpc",
                .mode           = 0555,
index ffd50348a509e392fa2255f17f35a9291239e91c..412de7cfcc80494130b3cd611f698f335e399e4a 100644 (file)
@@ -87,7 +87,7 @@ static struct ctl_table_header *sunrpc_table_header;
  * FIXME: changing the UDP slot table size should also resize the UDP
  *        socket buffers for existing UDP transports
  */
-static ctl_table xs_tunables_table[] = {
+static struct ctl_table xs_tunables_table[] = {
        {
                .procname       = "udp_slot_table_entries",
                .data           = &xprt_udp_slot_table_entries,
@@ -143,7 +143,7 @@ static ctl_table xs_tunables_table[] = {
        { },
 };
 
-static ctl_table sunrpc_table[] = {
+static struct ctl_table sunrpc_table[] = {
        {
                .procname       = "sunrpc",
                .mode           = 0555,
index 4df8e02d900823cbb417e1f032bee672f9081966..b282f7130d2bb51f0dee12e4800a9a0ad54a33a7 100644 (file)
@@ -8,6 +8,7 @@ tipc-y  += addr.o bcast.o bearer.o config.o \
           core.o handler.o link.o discover.o msg.o  \
           name_distr.o  subscr.o name_table.o net.o  \
           netlink.o node.o node_subscr.o port.o ref.o  \
-          socket.o log.o eth_media.o
+          socket.o log.o eth_media.o server.o
 
 tipc-$(CONFIG_TIPC_MEDIA_IB)   += ib_media.o
+tipc-$(CONFIG_SYSCTL)          += sysctl.o
index e5f3da507823678240df70fa26404b8d7ae00d36..716de1ac6cb560c8cc55a257a0d76d39230634f3 100644 (file)
@@ -578,8 +578,7 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
  * Returns 0 (packet sent successfully) under all circumstances,
  * since the broadcast link's pseudo-bearer never blocks
  */
-static int tipc_bcbearer_send(struct sk_buff *buf,
-                             struct tipc_bearer *unused1,
+static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
                              struct tipc_media_addr *unused2)
 {
        int bp_index;
index a93306557e00c7a1d0522d8f2e3b2654dd7cf10b..6ee587b469fd3fd9df70a7f086f1f5485dc5caa6 100644 (file)
@@ -75,7 +75,8 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
 /**
  * tipc_nmap_equal - test for equality of node maps
  */
-static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
+static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
+                                 struct tipc_node_map *nm_b)
 {
        return !memcmp(nm_a, nm_b, sizeof(*nm_a));
 }
index f67866c765dd574130bb17d5476c8c9723d4612a..c301a9a592d82d570050116df07e54a4551da537 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/config.c: TIPC configuration management code
  *
  * Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2012, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "port.h"
 #include "name_table.h"
 #include "config.h"
+#include "server.h"
 
 #define REPLY_TRUNCATED "<truncated>\n"
 
-static u32 config_port_ref;
-
-static DEFINE_SPINLOCK(config_lock);
+static DEFINE_MUTEX(config_mutex);
+static struct tipc_server cfgsrv;
 
 static const void *req_tlv_area;       /* request message TLV area */
 static int req_tlv_space;              /* request message TLV area size */
@@ -181,18 +181,7 @@ static struct sk_buff *cfg_set_own_addr(void)
        if (tipc_own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change node address once assigned)");
-
-       /*
-        * Must temporarily release configuration spinlock while switching into
-        * networking mode as it calls tipc_eth_media_start(), which may sleep.
-        * Releasing the lock is harmless as other locally-issued configuration
-        * commands won't occur until this one completes, and remotely-issued
-        * configuration commands can't be received until a local configuration
-        * command to enable the first bearer is received and processed.
-        */
-       spin_unlock_bh(&config_lock);
        tipc_core_start_net(addr);
-       spin_lock_bh(&config_lock);
        return tipc_cfg_reply_none();
 }
 
@@ -248,7 +237,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
 {
        struct sk_buff *rep_tlv_buf;
 
-       spin_lock_bh(&config_lock);
+       mutex_lock(&config_mutex);
 
        /* Save request and reply details in a well-known location */
        req_tlv_area = request_area;
@@ -377,37 +366,31 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
 
        /* Return reply buffer */
 exit:
-       spin_unlock_bh(&config_lock);
+       mutex_unlock(&config_mutex);
        return rep_tlv_buf;
 }
 
-static void cfg_named_msg_event(void *userdata,
-                               u32 port_ref,
-                               struct sk_buff **buf,
-                               const unchar *msg,
-                               u32 size,
-                               u32 importance,
-                               struct tipc_portid const *orig,
-                               struct tipc_name_seq const *dest)
+static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+                              void *usr_data, void *buf, size_t len)
 {
        struct tipc_cfg_msg_hdr *req_hdr;
        struct tipc_cfg_msg_hdr *rep_hdr;
        struct sk_buff *rep_buf;
+       int ret;
 
        /* Validate configuration message header (ignore invalid message) */
-       req_hdr = (struct tipc_cfg_msg_hdr *)msg;
-       if ((size < sizeof(*req_hdr)) ||
-           (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
+       req_hdr = (struct tipc_cfg_msg_hdr *)buf;
+       if ((len < sizeof(*req_hdr)) ||
+           (len != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
            (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
                pr_warn("Invalid configuration message discarded\n");
                return;
        }
 
        /* Generate reply for request (if can't, return request) */
-       rep_buf = tipc_cfg_do_cmd(orig->node,
-                                 ntohs(req_hdr->tcm_type),
-                                 msg + sizeof(*req_hdr),
-                                 size - sizeof(*req_hdr),
+       rep_buf = tipc_cfg_do_cmd(addr->addr.id.node, ntohs(req_hdr->tcm_type),
+                                 buf + sizeof(*req_hdr),
+                                 len - sizeof(*req_hdr),
                                  BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
        if (rep_buf) {
                skb_push(rep_buf, sizeof(*rep_hdr));
@@ -415,57 +398,51 @@ static void cfg_named_msg_event(void *userdata,
                memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
                rep_hdr->tcm_len = htonl(rep_buf->len);
                rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
-       } else {
-               rep_buf = *buf;
-               *buf = NULL;
-       }
 
-       /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
-       tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
+               ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
+                                       rep_buf->len);
+               if (ret < 0)
+                       pr_err("Sending cfg reply message failed, no memory\n");
+
+               kfree_skb(rep_buf);
+       }
 }
 
+static struct sockaddr_tipc cfgsrv_addr __read_mostly = {
+       .family                 = AF_TIPC,
+       .addrtype               = TIPC_ADDR_NAMESEQ,
+       .addr.nameseq.type      = TIPC_CFG_SRV,
+       .addr.nameseq.lower     = 0,
+       .addr.nameseq.upper     = 0,
+       .scope                  = TIPC_ZONE_SCOPE
+};
+
+static struct tipc_server cfgsrv __read_mostly = {
+       .saddr                  = &cfgsrv_addr,
+       .imp                    = TIPC_CRITICAL_IMPORTANCE,
+       .type                   = SOCK_RDM,
+       .max_rcvbuf_size        = 64 * 1024,
+       .name                   = "cfg_server",
+       .tipc_conn_recvmsg      = cfg_conn_msg_event,
+       .tipc_conn_new          = NULL,
+       .tipc_conn_shutdown     = NULL
+};
+
 int tipc_cfg_init(void)
 {
-       struct tipc_name_seq seq;
-       int res;
-
-       res = tipc_createport(NULL, TIPC_CRITICAL_IMPORTANCE,
-                             NULL, NULL, NULL,
-                             NULL, cfg_named_msg_event, NULL,
-                             NULL, &config_port_ref);
-       if (res)
-               goto failed;
-
-       seq.type = TIPC_CFG_SRV;
-       seq.lower = seq.upper = tipc_own_addr;
-       res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
-       if (res)
-               goto failed;
-
-       return 0;
-
-failed:
-       pr_err("Unable to create configuration service\n");
-       return res;
+       return tipc_server_start(&cfgsrv);
 }
 
 void tipc_cfg_reinit(void)
 {
-       struct tipc_name_seq seq;
-       int res;
-
-       seq.type = TIPC_CFG_SRV;
-       seq.lower = seq.upper = 0;
-       tipc_withdraw(config_port_ref, TIPC_ZONE_SCOPE, &seq);
+       tipc_server_stop(&cfgsrv);
 
-       seq.lower = seq.upper = tipc_own_addr;
-       res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
-       if (res)
-               pr_err("Unable to reinitialize configuration service\n");
+       cfgsrv_addr.addr.nameseq.lower = tipc_own_addr;
+       cfgsrv_addr.addr.nameseq.upper = tipc_own_addr;
+       tipc_server_start(&cfgsrv);
 }
 
 void tipc_cfg_stop(void)
 {
-       tipc_deleteport(config_port_ref);
-       config_port_ref = 0;
+       tipc_server_stop(&cfgsrv);
 }
index 7ec2c1eb94f17a40db04bb004a6abf35f2fc25cf..fd4eeeaa972a6f4226f1dce420d7fb0b17670c78 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/core.c: TIPC module code
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -39,6 +39,7 @@
 #include "name_table.h"
 #include "subscr.h"
 #include "config.h"
+#include "port.h"
 
 #include <linux/module.h>
 
@@ -50,7 +51,7 @@ u32 tipc_own_addr __read_mostly;
 int tipc_max_ports __read_mostly;
 int tipc_net_id __read_mostly;
 int tipc_remote_management __read_mostly;
-
+int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
 
 /**
  * tipc_buf_acquire - creates a TIPC message buffer
@@ -118,6 +119,7 @@ static void tipc_core_stop(void)
        tipc_nametbl_stop();
        tipc_ref_table_stop();
        tipc_socket_stop();
+       tipc_unregister_sysctl();
 }
 
 /**
@@ -134,21 +136,22 @@ static int tipc_core_start(void)
                res = tipc_ref_table_init(tipc_max_ports, tipc_random);
        if (!res)
                res = tipc_nametbl_init();
-       if (!res)
-               res = tipc_subscr_start();
-       if (!res)
-               res = tipc_cfg_init();
        if (!res)
                res = tipc_netlink_start();
        if (!res)
                res = tipc_socket_init();
+       if (!res)
+               res = tipc_register_sysctl();
+       if (!res)
+               res = tipc_subscr_start();
+       if (!res)
+               res = tipc_cfg_init();
        if (res)
                tipc_core_stop();
 
        return res;
 }
 
-
 static int __init tipc_init(void)
 {
        int res;
@@ -160,6 +163,11 @@ static int __init tipc_init(void)
        tipc_max_ports = CONFIG_TIPC_PORTS;
        tipc_net_id = 4711;
 
+       sysctl_tipc_rmem[0] = CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE;
+       sysctl_tipc_rmem[1] = CONN_OVERLOAD_LIMIT >> 4 <<
+                             TIPC_CRITICAL_IMPORTANCE;
+       sysctl_tipc_rmem[2] = CONN_OVERLOAD_LIMIT;
+
        res = tipc_core_start();
        if (res)
                pr_err("Unable to start in single node mode\n");
index 0207db04179a00feecf6f2cc04ded91f40f1db45..be72f8cebc536a425a79d99a5435fa10f8e58b89 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * net/tipc/core.h: Include file for TIPC global declarations
  *
- * Copyright (c) 2005-2006, Ericsson AB
- * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2005-2006, 2013 Ericsson AB
+ * Copyright (c) 2005-2007, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -80,6 +80,7 @@ extern u32 tipc_own_addr __read_mostly;
 extern int tipc_max_ports __read_mostly;
 extern int tipc_net_id __read_mostly;
 extern int tipc_remote_management __read_mostly;
+extern int sysctl_tipc_rmem[3] __read_mostly;
 
 /*
  * Other global variables
@@ -96,6 +97,18 @@ extern int  tipc_netlink_start(void);
 extern void tipc_netlink_stop(void);
 extern int  tipc_socket_init(void);
 extern void tipc_socket_stop(void);
+extern int tipc_sock_create_local(int type, struct socket **res);
+extern void tipc_sock_release_local(struct socket *sock);
+extern int tipc_sock_accept_local(struct socket *sock,
+                                 struct socket **newsock, int flags);
+
+#ifdef CONFIG_SYSCTL
+extern int tipc_register_sysctl(void);
+extern void tipc_unregister_sysctl(void);
+#else
+#define tipc_register_sysctl() 0
+#define tipc_unregister_sysctl()
+#endif
 
 /*
  * TIPC timer and signal code
index eedff58d03877c6eaae13feb6a5cab2508659a10..ecc758c6eacfced941fb90b923d2cfd94783a315 100644 (file)
@@ -70,8 +70,7 @@ struct tipc_link_req {
  * @dest_domain: network domain of node(s) which should respond to message
  * @b_ptr: ptr to bearer issuing message
  */
-static struct sk_buff *tipc_disc_init_msg(u32 type,
-                                         u32 dest_domain,
+static struct sk_buff *tipc_disc_init_msg(u32 type, u32 dest_domain,
                                          struct tipc_bearer *b_ptr)
 {
        struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
@@ -346,8 +345,8 @@ exit:
  *
  * Returns 0 if successful, otherwise -errno.
  */
-int tipc_disc_create(struct tipc_bearer *b_ptr,
-                    struct tipc_media_addr *dest, u32 dest_domain)
+int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
+                    u32 dest_domain)
 {
        struct tipc_link_req *req;
 
index 120a676a3360173311a7acb0a6499f0db921d5d9..40ea40cf6204506f4a579f99d2a0c3d8e79652ca 100644 (file)
@@ -62,7 +62,7 @@ static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
 static int eth_started;
 
 static int recv_notification(struct notifier_block *nb, unsigned long evt,
-                             void *dv);
+                            void *dv);
 /*
  * Network device notifier info
  */
@@ -162,8 +162,7 @@ static void setup_bearer(struct work_struct *work)
  */
 static int enable_bearer(struct tipc_bearer *tb_ptr)
 {
-       struct net_device *dev = NULL;
-       struct net_device *pdev = NULL;
+       struct net_device *dev;
        struct eth_bearer *eb_ptr = &eth_bearers[0];
        struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
        char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
@@ -178,15 +177,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        }
 
        /* Find device with specified name */
-       read_lock(&dev_base_lock);
-       for_each_netdev(&init_net, pdev) {
-               if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
-                       dev = pdev;
-                       dev_hold(dev);
-                       break;
-               }
-       }
-       read_unlock(&dev_base_lock);
+       dev = dev_get_by_name(&init_net, driver_name);
        if (!dev)
                return -ENODEV;
 
@@ -251,9 +242,9 @@ static void disable_bearer(struct tipc_bearer *tb_ptr)
  * specified device.
  */
 static int recv_notification(struct notifier_block *nb, unsigned long evt,
-                            void *dv)
+                            void *ptr)
 {
-       struct net_device *dev = (struct net_device *)dv;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct eth_bearer *eb_ptr = &eth_bearers[0];
        struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
 
index 2a2864c25e15c6d9282357c3685008c67a73197a..ad2e1ec4117e316de8a497ab7e83a7b4fe6a8eb8 100644 (file)
@@ -155,8 +155,7 @@ static void setup_bearer(struct work_struct *work)
  */
 static int enable_bearer(struct tipc_bearer *tb_ptr)
 {
-       struct net_device *dev = NULL;
-       struct net_device *pdev = NULL;
+       struct net_device *dev;
        struct ib_bearer *ib_ptr = &ib_bearers[0];
        struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
        char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
@@ -171,15 +170,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        }
 
        /* Find device with specified name */
-       read_lock(&dev_base_lock);
-       for_each_netdev(&init_net, pdev) {
-               if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
-                       dev = pdev;
-                       dev_hold(dev);
-                       break;
-               }
-       }
-       read_unlock(&dev_base_lock);
+       dev = dev_get_by_name(&init_net, driver_name);
        if (!dev)
                return -ENODEV;
 
@@ -244,9 +235,9 @@ static void disable_bearer(struct tipc_bearer *tb_ptr)
  * specified device.
  */
 static int recv_notification(struct notifier_block *nb, unsigned long evt,
-                            void *dv)
+                            void *ptr)
 {
-       struct net_device *dev = (struct net_device *)dv;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct ib_bearer *ib_ptr = &ib_bearers[0];
        struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
 
index a80feee5197a1c8a79a9397091ff9d0d72b09c22..0cc3d9015c5d5bb6a1629251d7a636f41239db6f 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/link.c: TIPC link code
  *
  * Copyright (c) 1996-2007, 2012, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -41,6 +41,8 @@
 #include "discover.h"
 #include "config.h"
 
+#include <linux/pkt_sched.h>
+
 /*
  * Error message prefixes
  */
@@ -771,8 +773,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
  * link_bundle_buf(): Append contents of a buffer to
  * the tail of an existing one.
  */
-static int link_bundle_buf(struct tipc_link *l_ptr,
-                          struct sk_buff *bundler,
+static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
                           struct sk_buff *buf)
 {
        struct tipc_msg *bundler_msg = buf_msg(bundler);
@@ -1056,40 +1057,6 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
        return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
 }
 
-/*
- * tipc_send_buf_fast: Entry for data messages where the
- * destination node is known and the header is complete,
- * inclusive total message length.
- * Returns user data length.
- */
-int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
-{
-       struct tipc_link *l_ptr;
-       struct tipc_node *n_ptr;
-       int res;
-       u32 selector = msg_origport(buf_msg(buf)) & 1;
-       u32 dummy;
-
-       read_lock_bh(&tipc_net_lock);
-       n_ptr = tipc_node_find(destnode);
-       if (likely(n_ptr)) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[selector];
-               if (likely(l_ptr)) {
-                       res = link_send_buf_fast(l_ptr, buf, &dummy);
-                       tipc_node_unlock(n_ptr);
-                       read_unlock_bh(&tipc_net_lock);
-                       return res;
-               }
-               tipc_node_unlock(n_ptr);
-       }
-       read_unlock_bh(&tipc_net_lock);
-       res = msg_data_sz(buf_msg(buf));
-       tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
-       return res;
-}
-
-
 /*
  * tipc_link_send_sections_fast: Entry for messages where the
  * destination processor is known and the header is complete,
@@ -1098,8 +1065,7 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
  */
 int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
-                                const u32 num_sect,
-                                unsigned int total_len,
+                                const u32 num_sect, unsigned int total_len,
                                 u32 destaddr)
 {
        struct tipc_msg *hdr = &sender->phdr;
@@ -1115,7 +1081,10 @@ again:
         * (Must not hold any locks while building message.)
         */
        res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
-                            sender->max_pkt, !sender->user_port, &buf);
+                            sender->max_pkt, &buf);
+       /* Exit if build request was invalid */
+       if (unlikely(res < 0))
+               return res;
 
        read_lock_bh(&tipc_net_lock);
        node = tipc_node_find(destaddr);
@@ -1132,10 +1101,6 @@ exit:
                                return res;
                        }
 
-                       /* Exit if build request was invalid */
-                       if (unlikely(res < 0))
-                               goto exit;
-
                        /* Exit if link (or bearer) is congested */
                        if (link_congested(l_ptr) ||
                            tipc_bearer_blocked(l_ptr->b_ptr)) {
@@ -1189,8 +1154,7 @@ exit:
  */
 static int link_send_sections_long(struct tipc_port *sender,
                                   struct iovec const *msg_sect,
-                                  u32 num_sect,
-                                  unsigned int total_len,
+                                  u32 num_sect, unsigned int total_len,
                                   u32 destaddr)
 {
        struct tipc_link *l_ptr;
@@ -1204,6 +1168,7 @@ static int link_send_sections_long(struct tipc_port *sender,
        const unchar *sect_crs;
        int curr_sect;
        u32 fragm_no;
+       int res = 0;
 
 again:
        fragm_no = 1;
@@ -1250,18 +1215,15 @@ again:
                else
                        sz = fragm_rest;
 
-               if (likely(!sender->user_port)) {
-                       if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+               if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+                       res = -EFAULT;
 error:
-                               for (; buf_chain; buf_chain = buf) {
-                                       buf = buf_chain->next;
-                                       kfree_skb(buf_chain);
-                               }
-                               return -EFAULT;
+                       for (; buf_chain; buf_chain = buf) {
+                               buf = buf_chain->next;
+                               kfree_skb(buf_chain);
                        }
-               } else
-                       skb_copy_to_linear_data_offset(buf, fragm_crs,
-                                                      sect_crs, sz);
+                       return res;
+               }
                sect_crs += sz;
                sect_rest -= sz;
                fragm_crs += sz;
@@ -1281,8 +1243,10 @@ error:
                        msg_set_fragm_no(&fragm_hdr, ++fragm_no);
                        prev = buf;
                        buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
-                       if (!buf)
+                       if (!buf) {
+                               res = -ENOMEM;
                                goto error;
+                       }
 
                        buf->next = NULL;
                        prev->next = buf;
@@ -1446,7 +1410,7 @@ static void link_reset_all(unsigned long addr)
 }
 
 static void link_retransmit_failure(struct tipc_link *l_ptr,
-                                       struct sk_buff *buf)
+                                   struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
 
@@ -1901,8 +1865,8 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
  * Send protocol message to the other endpoint.
  */
 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
-                               int probe_msg, u32 gap, u32 tolerance,
-                               u32 priority, u32 ack_mtu)
+                             int probe_msg, u32 gap, u32 tolerance,
+                             u32 priority, u32 ack_mtu)
 {
        struct sk_buff *buf = NULL;
        struct tipc_msg *msg = l_ptr->pmsg;
@@ -1988,6 +1952,7 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
                return;
 
        skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
+       buf->priority = TC_PRIO_CONTROL;
 
        /* Defer message if bearer is already blocked */
        if (tipc_bearer_blocked(l_ptr->b_ptr)) {
@@ -2145,8 +2110,7 @@ exit:
  * another bearer. Owner node is locked.
  */
 static void tipc_link_tunnel(struct tipc_link *l_ptr,
-                            struct tipc_msg *tunnel_hdr,
-                            struct tipc_msg  *msg,
+                            struct tipc_msg *tunnel_hdr, struct tipc_msg *msg,
                             u32 selector)
 {
        struct tipc_link *tunnel;
index f2db8a87d9c5a452d688d88f5df20a9b662dd1de..ced60e2fc4f7fbee929874594ee524c41835cd3e 100644 (file)
@@ -51,8 +51,8 @@ u32 tipc_msg_tot_importance(struct tipc_msg *m)
 }
 
 
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
-                           u32 hsize, u32 destnode)
+void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
+                  u32 destnode)
 {
        memset(m, 0, hsize);
        msg_set_version(m);
@@ -73,8 +73,8 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
  * Returns message data size or errno
  */
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  u32 num_sect, unsigned int total_len,
-                           int max_size, int usrmem, struct sk_buff **buf)
+                  u32 num_sect, unsigned int total_len, int max_size,
+                  struct sk_buff **buf)
 {
        int dsz, sz, hsz, pos, res, cnt;
 
@@ -92,14 +92,9 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
                return -ENOMEM;
        skb_copy_to_linear_data(*buf, hdr, hsz);
        for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
-               if (likely(usrmem))
-                       res = !copy_from_user((*buf)->data + pos,
-                                             msg_sect[cnt].iov_base,
-                                             msg_sect[cnt].iov_len);
-               else
-                       skb_copy_to_linear_data_offset(*buf, pos,
-                                                      msg_sect[cnt].iov_base,
-                                                      msg_sect[cnt].iov_len);
+               skb_copy_to_linear_data_offset(*buf, pos,
+                                              msg_sect[cnt].iov_base,
+                                              msg_sect[cnt].iov_len);
                pos += msg_sect[cnt].iov_len;
        }
        if (likely(res))
index ba2a72beea68dba6f3046ca0c69c9502008b1c3a..5e4ccf5c27df0361aaf26df8023ba79f20057936 100644 (file)
@@ -719,9 +719,9 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
 }
 
 u32 tipc_msg_tot_importance(struct tipc_msg *m);
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
-                           u32 hsize, u32 destnode);
+void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
+                  u32 destnode);
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  u32 num_sect, unsigned int total_len,
-                           int max_size, int usrmem, struct sk_buff **buf);
+                  u32 num_sect, unsigned int total_len, int max_size,
+                  struct sk_buff **buf);
 #endif
index 24b167914311fba8bdb25851232a1e604107865e..09dcd54b04e1d700347b217cc815709dec81e7c6 100644 (file)
@@ -440,7 +440,7 @@ found:
  * sequence overlapping with the requested sequence
  */
 static void tipc_nameseq_subscribe(struct name_seq *nseq,
-                                       struct tipc_subscription *s)
+                                  struct tipc_subscription *s)
 {
        struct sub_seq *sseq = nseq->sseqs;
 
@@ -662,7 +662,7 @@ exit:
  * tipc_nametbl_publish - add name publication to network name tables
  */
 struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
-                                   u32 scope, u32 port_ref, u32 key)
+                                        u32 scope, u32 port_ref, u32 key)
 {
        struct publication *publ;
 
@@ -753,7 +753,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
  * subseq_list - print specified sub-sequence contents into the given buffer
  */
 static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
-                       u32 index)
+                      u32 index)
 {
        char portIdStr[27];
        const char *scope_str[] = {"", " zone", " cluster", " node"};
@@ -792,7 +792,7 @@ static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
  * nameseq_list - print specified name sequence contents into the given buffer
  */
 static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth,
-                        u32 type, u32 lowbound, u32 upbound, u32 index)
+                       u32 type, u32 lowbound, u32 upbound, u32 index)
 {
        struct sub_seq *sseq;
        char typearea[11];
@@ -849,7 +849,7 @@ static int nametbl_header(char *buf, int len, u32 depth)
  * nametbl_list - print specified name table contents into the given buffer
  */
 static int nametbl_list(char *buf, int len, u32 depth_info,
-                        u32 type, u32 lowbound, u32 upbound)
+                       u32 type, u32 lowbound, u32 upbound)
 {
        struct hlist_head *seq_head;
        struct name_seq *seq;
index 71cb4dc712df8bc5898cff24eddcf64a10d9a049..f02f48b9a216e549a02a5518040ac7f55a085dbc 100644 (file)
@@ -87,14 +87,15 @@ extern rwlock_t tipc_nametbl_lock;
 struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
 u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
 int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                        struct tipc_port_list *dports);
+                             struct tipc_port_list *dports);
 struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
-                                   u32 scope, u32 port_ref, u32 key);
+                                        u32 scope, u32 port_ref, u32 key);
 int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
 struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
-                                       u32 scope, u32 node, u32 ref, u32 key);
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
-                                       u32 node, u32 ref, u32 key);
+                                            u32 scope, u32 node, u32 ref,
+                                            u32 key);
+struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node,
+                                            u32 ref, u32 key);
 void tipc_nametbl_subscribe(struct tipc_subscription *s);
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
 int tipc_nametbl_init(void);
index 5e34b015da457f96fff45b0dec423b7acf58d5bd..8a7384c04add4bdc6db6ae4451ebb2232e4b338b 100644 (file)
@@ -42,7 +42,7 @@
  * tipc_nodesub_subscribe - create "node down" subscription for specified node
  */
 void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
-                      void *usr_handle, net_ev_handler handle_down)
+                           void *usr_handle, net_ev_handler handle_down)
 {
        if (in_own_node(addr)) {
                node_sub->node = NULL;
index 18098cac62f23e942b6c3e42839bb3c2e4fdcea3..b3ed2fcab4fbd3a947b6419856641c9a7b315872 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/port.c: TIPC port code
  *
  * Copyright (c) 1992-2007, Ericsson AB
- * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 #define MAX_REJECT_SIZE 1024
 
-static struct sk_buff *msg_queue_head;
-static struct sk_buff *msg_queue_tail;
-
 DEFINE_SPINLOCK(tipc_port_list_lock);
-static DEFINE_SPINLOCK(queue_lock);
 
 static LIST_HEAD(ports);
 static void port_handle_node_down(unsigned long ref);
@@ -119,7 +115,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
        msg_set_nameupper(hdr, seq->upper);
        msg_set_hdr_sz(hdr, MCAST_H_SIZE);
        res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-                       !oport->user_port, &buf);
+                            &buf);
        if (unlikely(!buf))
                return res;
 
@@ -206,14 +202,15 @@ exit:
 }
 
 /**
- * tipc_createport_raw - create a generic TIPC port
+ * tipc_createport - create a generic TIPC port
  *
  * Returns pointer to (locked) TIPC port, or NULL if unable to create it
  */
-struct tipc_port *tipc_createport_raw(void *usr_handle,
-                       u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
-                       void (*wakeup)(struct tipc_port *),
-                       const u32 importance)
+struct tipc_port *tipc_createport(struct sock *sk,
+                                 u32 (*dispatcher)(struct tipc_port *,
+                                 struct sk_buff *),
+                                 void (*wakeup)(struct tipc_port *),
+                                 const u32 importance)
 {
        struct tipc_port *p_ptr;
        struct tipc_msg *msg;
@@ -231,14 +228,13 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
                return NULL;
        }
 
-       p_ptr->usr_handle = usr_handle;
+       p_ptr->sk = sk;
        p_ptr->max_pkt = MAX_PKT_DEFAULT;
        p_ptr->ref = ref;
        INIT_LIST_HEAD(&p_ptr->wait_list);
        INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
        p_ptr->dispatcher = dispatcher;
        p_ptr->wakeup = wakeup;
-       p_ptr->user_port = NULL;
        k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
        INIT_LIST_HEAD(&p_ptr->publications);
        INIT_LIST_HEAD(&p_ptr->port_list);
@@ -275,7 +271,6 @@ int tipc_deleteport(u32 ref)
                buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
                tipc_nodesub_unsubscribe(&p_ptr->subscription);
        }
-       kfree(p_ptr->user_port);
 
        spin_lock_bh(&tipc_port_list_lock);
        list_del(&p_ptr->port_list);
@@ -448,7 +443,7 @@ int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
        int res;
 
        res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-                       !p_ptr->user_port, &buf);
+                            &buf);
        if (!buf)
                return res;
 
@@ -668,215 +663,6 @@ void tipc_port_reinit(void)
        spin_unlock_bh(&tipc_port_list_lock);
 }
 
-
-/*
- *  port_dispatcher_sigh(): Signal handler for messages destinated
- *                          to the tipc_port interface.
- */
-static void port_dispatcher_sigh(void *dummy)
-{
-       struct sk_buff *buf;
-
-       spin_lock_bh(&queue_lock);
-       buf = msg_queue_head;
-       msg_queue_head = NULL;
-       spin_unlock_bh(&queue_lock);
-
-       while (buf) {
-               struct tipc_port *p_ptr;
-               struct user_port *up_ptr;
-               struct tipc_portid orig;
-               struct tipc_name_seq dseq;
-               void *usr_handle;
-               int connected;
-               int peer_invalid;
-               int published;
-               u32 message_type;
-
-               struct sk_buff *next = buf->next;
-               struct tipc_msg *msg = buf_msg(buf);
-               u32 dref = msg_destport(msg);
-
-               message_type = msg_type(msg);
-               if (message_type > TIPC_DIRECT_MSG)
-                       goto reject;    /* Unsupported message type */
-
-               p_ptr = tipc_port_lock(dref);
-               if (!p_ptr)
-                       goto reject;    /* Port deleted while msg in queue */
-
-               orig.ref = msg_origport(msg);
-               orig.node = msg_orignode(msg);
-               up_ptr = p_ptr->user_port;
-               usr_handle = up_ptr->usr_handle;
-               connected = p_ptr->connected;
-               peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg);
-               published = p_ptr->published;
-
-               if (unlikely(msg_errcode(msg)))
-                       goto err;
-
-               switch (message_type) {
-
-               case TIPC_CONN_MSG:{
-                               tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
-                               u32 dsz;
-
-                               tipc_port_unlock(p_ptr);
-                               if (unlikely(!cb))
-                                       goto reject;
-                               if (unlikely(!connected)) {
-                                       if (tipc_connect(dref, &orig))
-                                               goto reject;
-                               } else if (peer_invalid)
-                                       goto reject;
-                               dsz = msg_data_sz(msg);
-                               if (unlikely(dsz &&
-                                            (++p_ptr->conn_unacked >=
-                                             TIPC_FLOW_CONTROL_WIN)))
-                                       tipc_acknowledge(dref,
-                                                        p_ptr->conn_unacked);
-                               skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg), dsz);
-                               break;
-                       }
-               case TIPC_DIRECT_MSG:{
-                               tipc_msg_event cb = up_ptr->msg_cb;
-
-                               tipc_port_unlock(p_ptr);
-                               if (unlikely(!cb || connected))
-                                       goto reject;
-                               skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg),
-                                  msg_data_sz(msg), msg_importance(msg),
-                                  &orig);
-                               break;
-                       }
-               case TIPC_MCAST_MSG:
-               case TIPC_NAMED_MSG:{
-                               tipc_named_msg_event cb = up_ptr->named_msg_cb;
-
-                               tipc_port_unlock(p_ptr);
-                               if (unlikely(!cb || connected || !published))
-                                       goto reject;
-                               dseq.type =  msg_nametype(msg);
-                               dseq.lower = msg_nameinst(msg);
-                               dseq.upper = (message_type == TIPC_NAMED_MSG)
-                                       ? dseq.lower : msg_nameupper(msg);
-                               skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg),
-                                  msg_data_sz(msg), msg_importance(msg),
-                                  &orig, &dseq);
-                               break;
-                       }
-               }
-               if (buf)
-                       kfree_skb(buf);
-               buf = next;
-               continue;
-err:
-               switch (message_type) {
-
-               case TIPC_CONN_MSG:{
-                               tipc_conn_shutdown_event cb =
-                                       up_ptr->conn_err_cb;
-
-                               tipc_port_unlock(p_ptr);
-                               if (!cb || !connected || peer_invalid)
-                                       break;
-                               tipc_disconnect(dref);
-                               skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg),
-                                  msg_data_sz(msg), msg_errcode(msg));
-                               break;
-                       }
-               case TIPC_DIRECT_MSG:{
-                               tipc_msg_err_event cb = up_ptr->err_cb;
-
-                               tipc_port_unlock(p_ptr);
-                               if (!cb || connected)
-                                       break;
-                               skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg),
-                                  msg_data_sz(msg), msg_errcode(msg), &orig);
-                               break;
-                       }
-               case TIPC_MCAST_MSG:
-               case TIPC_NAMED_MSG:{
-                               tipc_named_msg_err_event cb =
-                                       up_ptr->named_err_cb;
-
-                               tipc_port_unlock(p_ptr);
-                               if (!cb || connected)
-                                       break;
-                               dseq.type =  msg_nametype(msg);
-                               dseq.lower = msg_nameinst(msg);
-                               dseq.upper = (message_type == TIPC_NAMED_MSG)
-                                       ? dseq.lower : msg_nameupper(msg);
-                               skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg),
-                                  msg_data_sz(msg), msg_errcode(msg), &dseq);
-                               break;
-                       }
-               }
-               if (buf)
-                       kfree_skb(buf);
-               buf = next;
-               continue;
-reject:
-               tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
-               buf = next;
-       }
-}
-
-/*
- *  port_dispatcher(): Dispatcher for messages destinated
- *  to the tipc_port interface. Called with port locked.
- */
-static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
-{
-       buf->next = NULL;
-       spin_lock_bh(&queue_lock);
-       if (msg_queue_head) {
-               msg_queue_tail->next = buf;
-               msg_queue_tail = buf;
-       } else {
-               msg_queue_tail = msg_queue_head = buf;
-               tipc_k_signal((Handler)port_dispatcher_sigh, 0);
-       }
-       spin_unlock_bh(&queue_lock);
-       return 0;
-}
-
-/*
- * Wake up port after congestion: Called with port locked
- */
-static void port_wakeup_sh(unsigned long ref)
-{
-       struct tipc_port *p_ptr;
-       struct user_port *up_ptr;
-       tipc_continue_event cb = NULL;
-       void *uh = NULL;
-
-       p_ptr = tipc_port_lock(ref);
-       if (p_ptr) {
-               up_ptr = p_ptr->user_port;
-               if (up_ptr) {
-                       cb = up_ptr->continue_event_cb;
-                       uh = up_ptr->usr_handle;
-               }
-               tipc_port_unlock(p_ptr);
-       }
-       if (cb)
-               cb(uh, ref);
-}
-
-
-static void port_wakeup(struct tipc_port *p_ptr)
-{
-       tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
-}
-
 void tipc_acknowledge(u32 ref, u32 ack)
 {
        struct tipc_port *p_ptr;
@@ -893,50 +679,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
        tipc_net_route_msg(buf);
 }
 
-/*
- * tipc_createport(): user level call.
- */
-int tipc_createport(void *usr_handle,
-                   unsigned int importance,
-                   tipc_msg_err_event error_cb,
-                   tipc_named_msg_err_event named_error_cb,
-                   tipc_conn_shutdown_event conn_error_cb,
-                   tipc_msg_event msg_cb,
-                   tipc_named_msg_event named_msg_cb,
-                   tipc_conn_msg_event conn_msg_cb,
-                   tipc_continue_event continue_event_cb, /* May be zero */
-                   u32 *portref)
-{
-       struct user_port *up_ptr;
-       struct tipc_port *p_ptr;
-
-       up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
-       if (!up_ptr) {
-               pr_warn("Port creation failed, no memory\n");
-               return -ENOMEM;
-       }
-       p_ptr = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
-                                   importance);
-       if (!p_ptr) {
-               kfree(up_ptr);
-               return -ENOMEM;
-       }
-
-       p_ptr->user_port = up_ptr;
-       up_ptr->usr_handle = usr_handle;
-       up_ptr->ref = p_ptr->ref;
-       up_ptr->err_cb = error_cb;
-       up_ptr->named_err_cb = named_error_cb;
-       up_ptr->conn_err_cb = conn_error_cb;
-       up_ptr->msg_cb = msg_cb;
-       up_ptr->named_msg_cb = named_msg_cb;
-       up_ptr->conn_msg_cb = conn_msg_cb;
-       up_ptr->continue_event_cb = continue_event_cb;
-       *portref = p_ptr->ref;
-       tipc_port_unlock(p_ptr);
-       return 0;
-}
-
 int tipc_portimportance(u32 ref, unsigned int *importance)
 {
        struct tipc_port *p_ptr;
@@ -1184,7 +926,7 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
        int res;
 
        res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len,
-                       MAX_MSG_SIZE, !sender->user_port, &buf);
+                            MAX_MSG_SIZE, &buf);
        if (likely(buf))
                tipc_port_recv_msg(buf);
        return res;
@@ -1322,43 +1064,3 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
        }
        return -ELINKCONG;
 }
-
-/**
- * tipc_send_buf2port - send message buffer to port identity
- */
-int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
-              struct sk_buff *buf, unsigned int dsz)
-{
-       struct tipc_port *p_ptr;
-       struct tipc_msg *msg;
-       int res;
-
-       p_ptr = (struct tipc_port *)tipc_ref_deref(ref);
-       if (!p_ptr || p_ptr->connected)
-               return -EINVAL;
-
-       msg = &p_ptr->phdr;
-       msg_set_type(msg, TIPC_DIRECT_MSG);
-       msg_set_destnode(msg, dest->node);
-       msg_set_destport(msg, dest->ref);
-       msg_set_hdr_sz(msg, BASIC_H_SIZE);
-       msg_set_size(msg, BASIC_H_SIZE + dsz);
-       if (skb_cow(buf, BASIC_H_SIZE))
-               return -ENOMEM;
-
-       skb_push(buf, BASIC_H_SIZE);
-       skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE);
-
-       if (in_own_node(dest->node))
-               res = tipc_port_recv_msg(buf);
-       else
-               res = tipc_send_buf_fast(buf, dest->node);
-       if (likely(res != -ELINKCONG)) {
-               if (res > 0)
-                       p_ptr->sent++;
-               return res;
-       }
-       if (port_unreliable(p_ptr))
-               return dsz;
-       return -ELINKCONG;
-}
index fb66e2e5f4d1a93348c24e212946d74c776c0331..5a7026b9c3456b716dd8a173b345d11355f1b3be 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/port.h: Include file for TIPC port code
  *
  * Copyright (c) 1994-2007, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "node_subscr.h"
 
 #define TIPC_FLOW_CONTROL_WIN 512
-
-typedef void (*tipc_msg_err_event) (void *usr_handle, u32 portref,
-               struct sk_buff **buf, unsigned char const *data,
-               unsigned int size, int reason,
-               struct tipc_portid const *attmpt_destid);
-
-typedef void (*tipc_named_msg_err_event) (void *usr_handle, u32 portref,
-               struct sk_buff **buf, unsigned char const *data,
-               unsigned int size, int reason,
-               struct tipc_name_seq const *attmpt_dest);
-
-typedef void (*tipc_conn_shutdown_event) (void *usr_handle, u32 portref,
-               struct sk_buff **buf, unsigned char const *data,
-               unsigned int size, int reason);
-
-typedef void (*tipc_msg_event) (void *usr_handle, u32 portref,
-               struct sk_buff **buf, unsigned char const *data,
-               unsigned int size, unsigned int importance,
-               struct tipc_portid const *origin);
-
-typedef void (*tipc_named_msg_event) (void *usr_handle, u32 portref,
-               struct sk_buff **buf, unsigned char const *data,
-               unsigned int size, unsigned int importance,
-               struct tipc_portid const *orig,
-               struct tipc_name_seq const *dest);
-
-typedef void (*tipc_conn_msg_event) (void *usr_handle, u32 portref,
-               struct sk_buff **buf, unsigned char const *data,
-               unsigned int size);
-
-typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
-
-/**
- * struct user_port - TIPC user port (used with native API)
- * @usr_handle: user-specified field
- * @ref: object reference to associated TIPC port
- *
- * <various callback routines>
- */
-struct user_port {
-       void *usr_handle;
-       u32 ref;
-       tipc_msg_err_event err_cb;
-       tipc_named_msg_err_event named_err_cb;
-       tipc_conn_shutdown_event conn_err_cb;
-       tipc_msg_event msg_cb;
-       tipc_named_msg_event named_msg_cb;
-       tipc_conn_msg_event conn_msg_cb;
-       tipc_continue_event continue_event_cb;
-};
+#define CONN_OVERLOAD_LIMIT    ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
+                               SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 
 /**
  * struct tipc_port - TIPC port structure
- * @usr_handle: pointer to additional user-defined information about port
+ * @sk: pointer to socket handle
  * @lock: pointer to spinlock for controlling access to port
  * @connected: non-zero if port is currently connected to a peer port
  * @conn_type: TIPC type used when connection was established
@@ -110,7 +62,6 @@ struct user_port {
  * @port_list: adjacent ports in TIPC's global list of ports
  * @dispatcher: ptr to routine which handles received messages
  * @wakeup: ptr to routine to call when port is no longer congested
- * @user_port: ptr to user port associated with port (if any)
  * @wait_list: adjacent ports in list of ports waiting on link congestion
  * @waiting_pkts:
  * @sent: # of non-empty messages sent by port
@@ -123,7 +74,7 @@ struct user_port {
  * @subscription: "node down" subscription used to terminate failed connections
  */
 struct tipc_port {
-       void *usr_handle;
+       struct sock *sk;
        spinlock_t *lock;
        int connected;
        u32 conn_type;
@@ -137,7 +88,6 @@ struct tipc_port {
        struct list_head port_list;
        u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
        void (*wakeup)(struct tipc_port *);
-       struct user_port *user_port;
        struct list_head wait_list;
        u32 waiting_pkts;
        u32 sent;
@@ -156,24 +106,16 @@ struct tipc_port_list;
 /*
  * TIPC port manipulation routines
  */
-struct tipc_port *tipc_createport_raw(void *usr_handle,
-               u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
-               void (*wakeup)(struct tipc_port *), const u32 importance);
+struct tipc_port *tipc_createport(struct sock *sk,
+                                 u32 (*dispatcher)(struct tipc_port *,
+                                 struct sk_buff *),
+                                 void (*wakeup)(struct tipc_port *),
+                                 const u32 importance);
 
 int tipc_reject_msg(struct sk_buff *buf, u32 err);
 
-int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
-
 void tipc_acknowledge(u32 port_ref, u32 ack);
 
-int tipc_createport(void *usr_handle,
-               unsigned int importance, tipc_msg_err_event error_cb,
-               tipc_named_msg_err_event named_error_cb,
-               tipc_conn_shutdown_event conn_error_cb, tipc_msg_event msg_cb,
-               tipc_named_msg_event named_msg_cb,
-               tipc_conn_msg_event conn_msg_cb,
-               tipc_continue_event continue_event_cb, u32 *portref);
-
 int tipc_deleteport(u32 portref);
 
 int tipc_portimportance(u32 portref, unsigned int *importance);
@@ -186,9 +128,9 @@ int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
 int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
 
 int tipc_publish(u32 portref, unsigned int scope,
-               struct tipc_name_seq const *name_seq);
+                struct tipc_name_seq const *name_seq);
 int tipc_withdraw(u32 portref, unsigned int scope,
-               struct tipc_name_seq const *name_seq);
+                 struct tipc_name_seq const *name_seq);
 
 int tipc_connect(u32 portref, struct tipc_portid const *port);
 
@@ -220,9 +162,6 @@ int tipc_send2port(u32 portref, struct tipc_portid const *dest,
                   unsigned int num_sect, struct iovec const *msg_sect,
                   unsigned int total_len);
 
-int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
-               struct sk_buff *buf, unsigned int dsz);
-
 int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
                   unsigned int section_count, struct iovec const *msg,
                   unsigned int total_len);
diff --git a/net/tipc/server.c b/net/tipc/server.c
new file mode 100644 (file)
index 0000000..19da5ab
--- /dev/null
@@ -0,0 +1,596 @@
+/*
+ * net/tipc/server.c: TIPC server infrastructure
+ *
+ * Copyright (c) 2012-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "server.h"
+#include "core.h"
+#include <net/sock.h>
+
+/* Number of messages to send before rescheduling */
+#define MAX_SEND_MSG_COUNT     25
+#define MAX_RECV_MSG_COUNT     25
+#define CF_CONNECTED           1
+
+#define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
+
+/**
+ * struct tipc_conn - TIPC connection structure
+ * @kref: reference counter to connection object
+ * @conid: connection identifier
+ * @sock: socket handler associated with connection
+ * @flags: indicates connection state
+ * @server: pointer to connected server
+ * @rwork: receive work item
+ * @usr_data: user-specified field
+ * @rx_action: what to do when connection socket is active
+ * @outqueue: pointer to first outbound message in queue
+ * @outqueue_lock: controll access to the outqueue
+ * @outqueue: list of connection objects for its server
+ * @swork: send work item
+ */
+struct tipc_conn {
+       struct kref kref;
+       int conid;
+       struct socket *sock;
+       unsigned long flags;
+       struct tipc_server *server;
+       struct work_struct rwork;
+       int (*rx_action) (struct tipc_conn *con);
+       void *usr_data;
+       struct list_head outqueue;
+       spinlock_t outqueue_lock;
+       struct work_struct swork;
+};
+
+/* An entry waiting to be sent */
+struct outqueue_entry {
+       struct list_head list;
+       struct kvec iov;
+       struct sockaddr_tipc dest;
+};
+
+static void tipc_recv_work(struct work_struct *work);
+static void tipc_send_work(struct work_struct *work);
+static void tipc_clean_outqueues(struct tipc_conn *con);
+
+static void tipc_conn_kref_release(struct kref *kref)
+{
+       struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+       struct tipc_server *s = con->server;
+
+       if (con->sock) {
+               tipc_sock_release_local(con->sock);
+               con->sock = NULL;
+       }
+
+       tipc_clean_outqueues(con);
+
+       if (con->conid)
+               s->tipc_conn_shutdown(con->conid, con->usr_data);
+
+       kfree(con);
+}
+
+static void conn_put(struct tipc_conn *con)
+{
+       kref_put(&con->kref, tipc_conn_kref_release);
+}
+
+static void conn_get(struct tipc_conn *con)
+{
+       kref_get(&con->kref);
+}
+
+static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
+{
+       struct tipc_conn *con;
+
+       spin_lock_bh(&s->idr_lock);
+       con = idr_find(&s->conn_idr, conid);
+       if (con)
+               conn_get(con);
+       spin_unlock_bh(&s->idr_lock);
+       return con;
+}
+
+static void sock_data_ready(struct sock *sk, int unused)
+{
+       struct tipc_conn *con;
+
+       read_lock(&sk->sk_callback_lock);
+       con = sock2con(sk);
+       if (con && test_bit(CF_CONNECTED, &con->flags)) {
+               conn_get(con);
+               if (!queue_work(con->server->rcv_wq, &con->rwork))
+                       conn_put(con);
+       }
+       read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_write_space(struct sock *sk)
+{
+       struct tipc_conn *con;
+
+       read_lock(&sk->sk_callback_lock);
+       con = sock2con(sk);
+       if (con && test_bit(CF_CONNECTED, &con->flags)) {
+               conn_get(con);
+               if (!queue_work(con->server->send_wq, &con->swork))
+                       conn_put(con);
+       }
+       read_unlock(&sk->sk_callback_lock);
+}
+
+static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
+{
+       struct sock *sk = sock->sk;
+
+       write_lock_bh(&sk->sk_callback_lock);
+
+       sk->sk_data_ready = sock_data_ready;
+       sk->sk_write_space = sock_write_space;
+       sk->sk_user_data = con;
+
+       con->sock = sock;
+
+       write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void tipc_unregister_callbacks(struct tipc_conn *con)
+{
+       struct sock *sk = con->sock->sk;
+
+       write_lock_bh(&sk->sk_callback_lock);
+       sk->sk_user_data = NULL;
+       write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void tipc_close_conn(struct tipc_conn *con)
+{
+       struct tipc_server *s = con->server;
+
+       if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+               spin_lock_bh(&s->idr_lock);
+               idr_remove(&s->conn_idr, con->conid);
+               s->idr_in_use--;
+               spin_unlock_bh(&s->idr_lock);
+
+               tipc_unregister_callbacks(con);
+
+               /* We shouldn't flush pending works as we may be in the
+                * thread. In fact the races with pending rx/tx work structs
+                * are harmless for us here as we have already deleted this
+                * connection from server connection list and set
+                * sk->sk_user_data to 0 before releasing connection object.
+                */
+               kernel_sock_shutdown(con->sock, SHUT_RDWR);
+
+               conn_put(con);
+       }
+}
+
+static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
+{
+       struct tipc_conn *con;
+       int ret;
+
+       con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
+       if (!con)
+               return ERR_PTR(-ENOMEM);
+
+       kref_init(&con->kref);
+       INIT_LIST_HEAD(&con->outqueue);
+       spin_lock_init(&con->outqueue_lock);
+       INIT_WORK(&con->swork, tipc_send_work);
+       INIT_WORK(&con->rwork, tipc_recv_work);
+
+       spin_lock_bh(&s->idr_lock);
+       ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
+       if (ret < 0) {
+               kfree(con);
+               spin_unlock_bh(&s->idr_lock);
+               return ERR_PTR(-ENOMEM);
+       }
+       con->conid = ret;
+       s->idr_in_use++;
+       spin_unlock_bh(&s->idr_lock);
+
+       set_bit(CF_CONNECTED, &con->flags);
+       con->server = s;
+
+       return con;
+}
+
+static int tipc_receive_from_sock(struct tipc_conn *con)
+{
+       struct msghdr msg = {};
+       struct tipc_server *s = con->server;
+       struct sockaddr_tipc addr;
+       struct kvec iov;
+       void *buf;
+       int ret;
+
+       buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto out_close;
+       }
+
+       iov.iov_base = buf;
+       iov.iov_len = s->max_rcvbuf_size;
+       msg.msg_name = &addr;
+       ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
+                            MSG_DONTWAIT);
+       if (ret <= 0) {
+               kmem_cache_free(s->rcvbuf_cache, buf);
+               goto out_close;
+       }
+
+       s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret);
+
+       kmem_cache_free(s->rcvbuf_cache, buf);
+
+       return 0;
+
+out_close:
+       if (ret != -EWOULDBLOCK)
+               tipc_close_conn(con);
+       else if (ret == 0)
+               /* Don't return success if we really got EOF */
+               ret = -EAGAIN;
+
+       return ret;
+}
+
+static int tipc_accept_from_sock(struct tipc_conn *con)
+{
+       struct tipc_server *s = con->server;
+       struct socket *sock = con->sock;
+       struct socket *newsock;
+       struct tipc_conn *newcon;
+       int ret;
+
+       ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK);
+       if (ret < 0)
+               return ret;
+
+       newcon = tipc_alloc_conn(con->server);
+       if (IS_ERR(newcon)) {
+               ret = PTR_ERR(newcon);
+               sock_release(newsock);
+               return ret;
+       }
+
+       newcon->rx_action = tipc_receive_from_sock;
+       tipc_register_callbacks(newsock, newcon);
+
+       /* Notify that new connection is incoming */
+       newcon->usr_data = s->tipc_conn_new(newcon->conid);
+
+       /* Wake up receive process in case of 'SYN+' message */
+       newsock->sk->sk_data_ready(newsock->sk, 0);
+       return ret;
+}
+
+static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
+{
+       struct tipc_server *s = con->server;
+       struct socket *sock = NULL;
+       int ret;
+
+       ret = tipc_sock_create_local(s->type, &sock);
+       if (ret < 0)
+               return NULL;
+       ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
+                               (char *)&s->imp, sizeof(s->imp));
+       if (ret < 0)
+               goto create_err;
+       ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
+       if (ret < 0)
+               goto create_err;
+
+       switch (s->type) {
+       case SOCK_STREAM:
+       case SOCK_SEQPACKET:
+               con->rx_action = tipc_accept_from_sock;
+
+               ret = kernel_listen(sock, 0);
+               if (ret < 0)
+                       goto create_err;
+               break;
+       case SOCK_DGRAM:
+       case SOCK_RDM:
+               con->rx_action = tipc_receive_from_sock;
+               break;
+       default:
+               pr_err("Unknown socket type %d\n", s->type);
+               goto create_err;
+       }
+       return sock;
+
+create_err:
+       sock_release(sock);
+       con->sock = NULL;
+       return NULL;
+}
+
+static int tipc_open_listening_sock(struct tipc_server *s)
+{
+       struct socket *sock;
+       struct tipc_conn *con;
+
+       con = tipc_alloc_conn(s);
+       if (IS_ERR(con))
+               return PTR_ERR(con);
+
+       sock = tipc_create_listen_sock(con);
+       if (!sock)
+               return -EINVAL;
+
+       tipc_register_callbacks(sock, con);
+       return 0;
+}
+
+static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
+{
+       struct outqueue_entry *entry;
+       void *buf;
+
+       entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC);
+       if (!entry)
+               return NULL;
+
+       buf = kmalloc(len, GFP_ATOMIC);
+       if (!buf) {
+               kfree(entry);
+               return NULL;
+       }
+
+       memcpy(buf, data, len);
+       entry->iov.iov_base = buf;
+       entry->iov.iov_len = len;
+
+       return entry;
+}
+
+static void tipc_free_entry(struct outqueue_entry *e)
+{
+       kfree(e->iov.iov_base);
+       kfree(e);
+}
+
+static void tipc_clean_outqueues(struct tipc_conn *con)
+{
+       struct outqueue_entry *e, *safe;
+
+       spin_lock_bh(&con->outqueue_lock);
+       list_for_each_entry_safe(e, safe, &con->outqueue, list) {
+               list_del(&e->list);
+               tipc_free_entry(e);
+       }
+       spin_unlock_bh(&con->outqueue_lock);
+}
+
+int tipc_conn_sendmsg(struct tipc_server *s, int conid,
+                     struct sockaddr_tipc *addr, void *data, size_t len)
+{
+       struct outqueue_entry *e;
+       struct tipc_conn *con;
+
+       con = tipc_conn_lookup(s, conid);
+       if (!con)
+               return -EINVAL;
+
+       e = tipc_alloc_entry(data, len);
+       if (!e) {
+               conn_put(con);
+               return -ENOMEM;
+       }
+
+       if (addr)
+               memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc));
+
+       spin_lock_bh(&con->outqueue_lock);
+       list_add_tail(&e->list, &con->outqueue);
+       spin_unlock_bh(&con->outqueue_lock);
+
+       if (test_bit(CF_CONNECTED, &con->flags))
+               if (!queue_work(s->send_wq, &con->swork))
+                       conn_put(con);
+
+       return 0;
+}
+
+void tipc_conn_terminate(struct tipc_server *s, int conid)
+{
+       struct tipc_conn *con;
+
+       con = tipc_conn_lookup(s, conid);
+       if (con) {
+               tipc_close_conn(con);
+               conn_put(con);
+       }
+}
+
+static void tipc_send_to_sock(struct tipc_conn *con)
+{
+       int count = 0;
+       struct tipc_server *s = con->server;
+       struct outqueue_entry *e;
+       struct msghdr msg;
+       int ret;
+
+       spin_lock_bh(&con->outqueue_lock);
+       while (1) {
+               e = list_entry(con->outqueue.next, struct outqueue_entry,
+                              list);
+               if ((struct list_head *) e == &con->outqueue)
+                       break;
+               spin_unlock_bh(&con->outqueue_lock);
+
+               memset(&msg, 0, sizeof(msg));
+               msg.msg_flags = MSG_DONTWAIT;
+
+               if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) {
+                       msg.msg_name = &e->dest;
+                       msg.msg_namelen = sizeof(struct sockaddr_tipc);
+               }
+               ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1,
+                                    e->iov.iov_len);
+               if (ret == -EWOULDBLOCK || ret == 0) {
+                       cond_resched();
+                       goto out;
+               } else if (ret < 0) {
+                       goto send_err;
+               }
+
+               /* Don't starve users filling buffers */
+               if (++count >= MAX_SEND_MSG_COUNT) {
+                       cond_resched();
+                       count = 0;
+               }
+
+               spin_lock_bh(&con->outqueue_lock);
+               list_del(&e->list);
+               tipc_free_entry(e);
+       }
+       spin_unlock_bh(&con->outqueue_lock);
+out:
+       return;
+
+send_err:
+       tipc_close_conn(con);
+}
+
+static void tipc_recv_work(struct work_struct *work)
+{
+       struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
+       int count = 0;
+
+       while (test_bit(CF_CONNECTED, &con->flags)) {
+               if (con->rx_action(con))
+                       break;
+
+               /* Don't flood Rx machine */
+               if (++count >= MAX_RECV_MSG_COUNT) {
+                       cond_resched();
+                       count = 0;
+               }
+       }
+       conn_put(con);
+}
+
+static void tipc_send_work(struct work_struct *work)
+{
+       struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
+
+       if (test_bit(CF_CONNECTED, &con->flags))
+               tipc_send_to_sock(con);
+
+       conn_put(con);
+}
+
+static void tipc_work_stop(struct tipc_server *s)
+{
+       destroy_workqueue(s->rcv_wq);
+       destroy_workqueue(s->send_wq);
+}
+
+static int tipc_work_start(struct tipc_server *s)
+{
+       s->rcv_wq = alloc_workqueue("tipc_rcv", WQ_UNBOUND, 1);
+       if (!s->rcv_wq) {
+               pr_err("can't start tipc receive workqueue\n");
+               return -ENOMEM;
+       }
+
+       s->send_wq = alloc_workqueue("tipc_send", WQ_UNBOUND, 1);
+       if (!s->send_wq) {
+               pr_err("can't start tipc send workqueue\n");
+               destroy_workqueue(s->rcv_wq);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+int tipc_server_start(struct tipc_server *s)
+{
+       int ret;
+
+       spin_lock_init(&s->idr_lock);
+       idr_init(&s->conn_idr);
+       s->idr_in_use = 0;
+
+       s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size,
+                                           0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!s->rcvbuf_cache)
+               return -ENOMEM;
+
+       ret = tipc_work_start(s);
+       if (ret < 0) {
+               kmem_cache_destroy(s->rcvbuf_cache);
+               return ret;
+       }
+       s->enabled = 1;
+
+       return tipc_open_listening_sock(s);
+}
+
+void tipc_server_stop(struct tipc_server *s)
+{
+       struct tipc_conn *con;
+       int total = 0;
+       int id;
+
+       if (!s->enabled)
+               return;
+
+       s->enabled = 0;
+       spin_lock_bh(&s->idr_lock);
+       for (id = 0; total < s->idr_in_use; id++) {
+               con = idr_find(&s->conn_idr, id);
+               if (con) {
+                       total++;
+                       spin_unlock_bh(&s->idr_lock);
+                       tipc_close_conn(con);
+                       spin_lock_bh(&s->idr_lock);
+               }
+       }
+       spin_unlock_bh(&s->idr_lock);
+
+       tipc_work_stop(s);
+       kmem_cache_destroy(s->rcvbuf_cache);
+       idr_destroy(&s->conn_idr);
+}
diff --git a/net/tipc/server.h b/net/tipc/server.h
new file mode 100644 (file)
index 0000000..98b23f2
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * net/tipc/server.h: Include file for TIPC server code
+ *
+ * Copyright (c) 2012-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SERVER_H
+#define _TIPC_SERVER_H
+
+#include "core.h"
+
+#define TIPC_SERVER_NAME_LEN   32
+
+/**
+ * struct tipc_server - TIPC server structure
+ * @conn_idr: identifier set of connection
+ * @idr_lock: protect the connection identifier set
+ * @idr_in_use: amount of allocated identifier entry
+ * @rcvbuf_cache: memory cache of server receive buffer
+ * @rcv_wq: receive workqueue
+ * @send_wq: send workqueue
+ * @max_rcvbuf_size: maximum permitted receive message length
+ * @tipc_conn_new: callback will be called when new connection is incoming
+ * @tipc_conn_shutdown: callback will be called when connection is shut down
+ * @tipc_conn_recvmsg: callback will be called when message arrives
+ * @saddr: TIPC server address
+ * @name: server name
+ * @imp: message importance
+ * @type: socket type
+ * @enabled: identify whether server is launched or not
+ */
+struct tipc_server {
+       struct idr conn_idr;
+       spinlock_t idr_lock;
+       int idr_in_use;
+       struct kmem_cache *rcvbuf_cache;
+       struct workqueue_struct *rcv_wq;
+       struct workqueue_struct *send_wq;
+       int max_rcvbuf_size;
+       void *(*tipc_conn_new) (int conid);
+       void (*tipc_conn_shutdown) (int conid, void *usr_data);
+       void (*tipc_conn_recvmsg) (int conid, struct sockaddr_tipc *addr,
+                                  void *usr_data, void *buf, size_t len);
+       struct sockaddr_tipc *saddr;
+       const char name[TIPC_SERVER_NAME_LEN];
+       int imp;
+       int type;
+       int enabled;
+};
+
+int tipc_conn_sendmsg(struct tipc_server *s, int conid,
+                     struct sockaddr_tipc *addr, void *data, size_t len);
+
+/**
+ * tipc_conn_terminate - terminate connection with server
+ *
+ * Note: Must call it in process context since it might sleep
+ */
+void tipc_conn_terminate(struct tipc_server *s, int conid);
+
+int tipc_server_start(struct tipc_server *s);
+
+void tipc_server_stop(struct tipc_server *s);
+
+#endif
index 515ce38e4f4c7286023f32f4c8c692af2787ec80..ce8249c768271b9f778df694261d09a6845c5f05 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/socket.c: TIPC socket API
  *
  * Copyright (c) 2001-2007, 2012 Ericsson AB
- * Copyright (c) 2004-2008, 2010-2012, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -43,8 +43,6 @@
 #define SS_LISTENING   -1      /* socket is listening */
 #define SS_READY       -2      /* socket is connectionless */
 
-#define CONN_OVERLOAD_LIMIT    ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
-                               SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 #define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
 
 struct tipc_sock {
@@ -65,12 +63,15 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
 static void wakeupdispatch(struct tipc_port *tport);
 static void tipc_data_ready(struct sock *sk, int len);
 static void tipc_write_space(struct sock *sk);
+static int release(struct socket *sock);
+static int accept(struct socket *sock, struct socket *new_sock, int flags);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
 static const struct proto_ops msg_ops;
 
 static struct proto tipc_proto;
+static struct proto tipc_proto_kern;
 
 static int sockets_enabled;
 
@@ -143,7 +144,7 @@ static void reject_rx_queue(struct sock *sk)
 }
 
 /**
- * tipc_create - create a TIPC socket
+ * tipc_sk_create - create a TIPC socket
  * @net: network namespace (must be default network)
  * @sock: pre-allocated socket structure
  * @protocol: protocol indicator (must be 0)
@@ -154,8 +155,8 @@ static void reject_rx_queue(struct sock *sk)
  *
  * Returns 0 on success, errno otherwise
  */
-static int tipc_create(struct net *net, struct socket *sock, int protocol,
-                      int kern)
+static int tipc_sk_create(struct net *net, struct socket *sock, int protocol,
+                         int kern)
 {
        const struct proto_ops *ops;
        socket_state state;
@@ -185,13 +186,17 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
        }
 
        /* Allocate socket's protocol area */
-       sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
+       if (!kern)
+               sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
+       else
+               sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
+
        if (sk == NULL)
                return -ENOMEM;
 
        /* Allocate TIPC port for socket to use */
-       tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
-                                    TIPC_LOW_IMPORTANCE);
+       tp_ptr = tipc_createport(sk, &dispatch, &wakeupdispatch,
+                                TIPC_LOW_IMPORTANCE);
        if (unlikely(!tp_ptr)) {
                sk_free(sk);
                return -ENOMEM;
@@ -203,6 +208,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
 
        sock_init_data(sock, sk);
        sk->sk_backlog_rcv = backlog_rcv;
+       sk->sk_rcvbuf = sysctl_tipc_rmem[1];
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
        tipc_sk(sk)->p = tp_ptr;
@@ -219,6 +225,78 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
        return 0;
 }
 
+/**
+ * tipc_sock_create_local - create TIPC socket from inside TIPC module
+ * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
+ *
+ * We cannot use sock_creat_kern here because it bumps module user count.
+ * Since socket owner and creator is the same module we must make sure
+ * that module count remains zero for module local sockets, otherwise
+ * we cannot do rmmod.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+int tipc_sock_create_local(int type, struct socket **res)
+{
+       int rc;
+       struct sock *sk;
+
+       rc = sock_create_lite(AF_TIPC, type, 0, res);
+       if (rc < 0) {
+               pr_err("Failed to create kernel socket\n");
+               return rc;
+       }
+       tipc_sk_create(&init_net, *res, 0, 1);
+
+       sk = (*res)->sk;
+
+       return 0;
+}
+
+/**
+ * tipc_sock_release_local - release socket created by tipc_sock_create_local
+ * @sock: the socket to be released.
+ *
+ * Module reference count is not incremented when such sockets are created,
+ * so we must keep it from being decremented when they are released.
+ */
+void tipc_sock_release_local(struct socket *sock)
+{
+       release(sock);
+       sock->ops = NULL;
+       sock_release(sock);
+}
+
+/**
+ * tipc_sock_accept_local - accept a connection on a socket created
+ * with tipc_sock_create_local. Use this function to avoid that
+ * module reference count is inadvertently incremented.
+ *
+ * @sock:    the accepting socket
+ * @newsock: reference to the new socket to be created
+ * @flags:   socket flags
+ */
+
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+                          int flags)
+{
+       struct sock *sk = sock->sk;
+       int ret;
+
+       ret = sock_create_lite(sk->sk_family, sk->sk_type,
+                              sk->sk_protocol, newsock);
+       if (ret < 0)
+               return ret;
+
+       ret = accept(sock, *newsock, flags);
+       if (ret < 0) {
+               sock_release(*newsock);
+               return ret;
+       }
+       (*newsock)->ops = sock->ops;
+       return ret;
+}
+
 /**
  * release - destroy a TIPC socket
  * @sock: socket to destroy
@@ -324,7 +402,9 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
        else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
                return -EAFNOSUPPORT;
 
-       if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES)
+       if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
+           (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
+           (addr->addr.nameseq.type != TIPC_CFG_SRV))
                return -EACCES;
 
        return (addr->scope > 0) ?
@@ -519,8 +599,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                        res = -EISCONN;
                        goto exit;
                }
-               if ((tport->published) ||
-                   ((sock->type == SOCK_STREAM) && (total_len != 0))) {
+               if (tport->published) {
                        res = -EOPNOTSUPP;
                        goto exit;
                }
@@ -810,7 +889,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
  * Returns 0 if successful, otherwise errno
  */
 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
-                               struct tipc_port *tport)
+                        struct tipc_port *tport)
 {
        u32 anc_data[3];
        u32 err;
@@ -1011,8 +1090,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
 
        lock_sock(sk);
 
-       if (unlikely((sock->state == SS_UNCONNECTED) ||
-                    (sock->state == SS_CONNECTING))) {
+       if (unlikely((sock->state == SS_UNCONNECTED))) {
                res = -ENOTCONN;
                goto exit;
        }
@@ -1233,10 +1311,10 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
  * For all connectionless messages, by default new queue limits are
  * as belows:
  *
- * TIPC_LOW_IMPORTANCE       (5MB)
- * TIPC_MEDIUM_IMPORTANCE    (10MB)
- * TIPC_HIGH_IMPORTANCE      (20MB)
- * TIPC_CRITICAL_IMPORTANCE  (40MB)
+ * TIPC_LOW_IMPORTANCE       (MB)
+ * TIPC_MEDIUM_IMPORTANCE    (MB)
+ * TIPC_HIGH_IMPORTANCE      (16 MB)
+ * TIPC_CRITICAL_IMPORTANCE  (32 MB)
  *
  * Returns overload limit according to corresponding message importance
  */
@@ -1246,9 +1324,10 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
        unsigned int limit;
 
        if (msg_connected(msg))
-               limit = CONN_OVERLOAD_LIMIT;
+               limit = sysctl_tipc_rmem[2];
        else
-               limit = sk->sk_rcvbuf << (msg_importance(msg) + 5);
+               limit = sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
+                       msg_importance(msg);
        return limit;
 }
 
@@ -1327,7 +1406,7 @@ static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
  */
 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
 {
-       struct sock *sk = (struct sock *)tport->usr_handle;
+       struct sock *sk = tport->sk;
        u32 res;
 
        /*
@@ -1358,7 +1437,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
  */
 static void wakeupdispatch(struct tipc_port *tport)
 {
-       struct sock *sk = (struct sock *)tport->usr_handle;
+       struct sock *sk = tport->sk;
 
        sk->sk_write_space(sk);
 }
@@ -1531,7 +1610,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
 
        buf = skb_peek(&sk->sk_receive_queue);
 
-       res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
+       res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
        if (res)
                goto exit;
 
@@ -1657,8 +1736,8 @@ restart:
  *
  * Returns 0 on success, errno otherwise
  */
-static int setsockopt(struct socket *sock,
-                     int lvl, int opt, char __user *ov, unsigned int ol)
+static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
+                     unsigned int ol)
 {
        struct sock *sk = sock->sk;
        struct tipc_port *tport = tipc_sk_port(sk);
@@ -1716,8 +1795,8 @@ static int setsockopt(struct socket *sock,
  *
  * Returns 0 on success, errno otherwise
  */
-static int getsockopt(struct socket *sock,
-                     int lvl, int opt, char __user *ov, int __user *ol)
+static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
+                     int __user *ol)
 {
        struct sock *sk = sock->sk;
        struct tipc_port *tport = tipc_sk_port(sk);
@@ -1841,13 +1920,20 @@ static const struct proto_ops stream_ops = {
 static const struct net_proto_family tipc_family_ops = {
        .owner          = THIS_MODULE,
        .family         = AF_TIPC,
-       .create         = tipc_create
+       .create         = tipc_sk_create
 };
 
 static struct proto tipc_proto = {
        .name           = "TIPC",
        .owner          = THIS_MODULE,
-       .obj_size       = sizeof(struct tipc_sock)
+       .obj_size       = sizeof(struct tipc_sock),
+       .sysctl_rmem    = sysctl_tipc_rmem
+};
+
+static struct proto tipc_proto_kern = {
+       .name           = "TIPC",
+       .obj_size       = sizeof(struct tipc_sock),
+       .sysctl_rmem    = sysctl_tipc_rmem
 };
 
 /**
index 6b42d47029af4f2d959a593578fc68bd174ebce6..d38bb45d82e99e36e8f5955911942413e26ddead 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/subscr.c: TIPC network topology service
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 /**
  * struct tipc_subscriber - TIPC network topology subscriber
- * @port_ref: object reference to server port connecting to subscriber
- * @lock: pointer to spinlock controlling access to subscriber's server port
- * @subscriber_list: adjacent subscribers in top. server's list of subscribers
+ * @conid: connection identifier to server connecting to subscriber
+ * @lock: controll access to subscriber
  * @subscription_list: list of subscription objects for this subscriber
  */
 struct tipc_subscriber {
-       u32 port_ref;
-       spinlock_t *lock;
-       struct list_head subscriber_list;
+       int conid;
+       spinlock_t lock;
        struct list_head subscription_list;
 };
 
-/**
- * struct top_srv - TIPC network topology subscription service
- * @setup_port: reference to TIPC port that handles subscription requests
- * @subscription_count: number of active subscriptions (not subscribers!)
- * @subscriber_list: list of ports subscribing to service
- * @lock: spinlock govering access to subscriber list
- */
-struct top_srv {
-       u32 setup_port;
-       atomic_t subscription_count;
-       struct list_head subscriber_list;
-       spinlock_t lock;
+static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+                                 void *usr_data, void *buf, size_t len);
+static void *subscr_named_msg_event(int conid);
+static void subscr_conn_shutdown_event(int conid, void *usr_data);
+
+static atomic_t subscription_count = ATOMIC_INIT(0);
+
+static struct sockaddr_tipc topsrv_addr __read_mostly = {
+       .family                 = AF_TIPC,
+       .addrtype               = TIPC_ADDR_NAMESEQ,
+       .addr.nameseq.type      = TIPC_TOP_SRV,
+       .addr.nameseq.lower     = TIPC_TOP_SRV,
+       .addr.nameseq.upper     = TIPC_TOP_SRV,
+       .scope                  = TIPC_NODE_SCOPE
 };
 
-static struct top_srv topsrv;
+static struct tipc_server topsrv __read_mostly = {
+       .saddr                  = &topsrv_addr,
+       .imp                    = TIPC_CRITICAL_IMPORTANCE,
+       .type                   = SOCK_SEQPACKET,
+       .max_rcvbuf_size        = sizeof(struct tipc_subscr),
+       .name                   = "topology_server",
+       .tipc_conn_recvmsg      = subscr_conn_msg_event,
+       .tipc_conn_new          = subscr_named_msg_event,
+       .tipc_conn_shutdown     = subscr_conn_shutdown_event,
+};
 
 /**
  * htohl - convert value to endianness used by destination
@@ -81,20 +90,13 @@ static u32 htohl(u32 in, int swap)
        return swap ? swab32(in) : in;
 }
 
-/**
- * subscr_send_event - send a message containing a tipc_event to the subscriber
- *
- * Note: Must not hold subscriber's server port lock, since tipc_send() will
- *       try to take the lock if the message is rejected and returned!
- */
-static void subscr_send_event(struct tipc_subscription *sub,
-                             u32 found_lower,
-                             u32 found_upper,
-                             u32 event,
-                             u32 port_ref,
+static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
+                             u32 found_upper, u32 event, u32 port_ref,
                              u32 node)
 {
-       struct iovec msg_sect;
+       struct tipc_subscriber *subscriber = sub->subscriber;
+       struct kvec msg_sect;
+       int ret;
 
        msg_sect.iov_base = (void *)&sub->evt;
        msg_sect.iov_len = sizeof(struct tipc_event);
@@ -104,7 +106,10 @@ static void subscr_send_event(struct tipc_subscription *sub,
        sub->evt.found_upper = htohl(found_upper, sub->swap);
        sub->evt.port.ref = htohl(port_ref, sub->swap);
        sub->evt.port.node = htohl(node, sub->swap);
-       tipc_send(sub->server_ref, 1, &msg_sect, msg_sect.iov_len);
+       ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
+                               msg_sect.iov_base, msg_sect.iov_len);
+       if (ret < 0)
+               pr_err("Sending subscription event failed, no memory\n");
 }
 
 /**
@@ -112,10 +117,8 @@ static void subscr_send_event(struct tipc_subscription *sub,
  *
  * Returns 1 if there is overlap, otherwise 0.
  */
-int tipc_subscr_overlap(struct tipc_subscription *sub,
-                       u32 found_lower,
+int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
                        u32 found_upper)
-
 {
        if (found_lower < sub->seq.lower)
                found_lower = sub->seq.lower;
@@ -131,13 +134,9 @@ int tipc_subscr_overlap(struct tipc_subscription *sub,
  *
  * Protected by nameseq.lock in name_table.c
  */
-void tipc_subscr_report_overlap(struct tipc_subscription *sub,
-                               u32 found_lower,
-                               u32 found_upper,
-                               u32 event,
-                               u32 port_ref,
-                               u32 node,
-                               int must)
+void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
+                               u32 found_upper, u32 event, u32 port_ref,
+                               u32 node, int must)
 {
        if (!tipc_subscr_overlap(sub, found_lower, found_upper))
                return;
@@ -147,21 +146,24 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub,
        subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
 }
 
-/**
- * subscr_timeout - subscription timeout has occurred
- */
 static void subscr_timeout(struct tipc_subscription *sub)
 {
-       struct tipc_port *server_port;
+       struct tipc_subscriber *subscriber = sub->subscriber;
+
+       /* The spin lock per subscriber is used to protect its members */
+       spin_lock_bh(&subscriber->lock);
 
-       /* Validate server port reference (in case subscriber is terminating) */
-       server_port = tipc_port_lock(sub->server_ref);
-       if (server_port == NULL)
+       /* Validate if the connection related to the subscriber is
+        * closed (in case subscriber is terminating)
+        */
+       if (subscriber->conid == 0) {
+               spin_unlock_bh(&subscriber->lock);
                return;
+       }
 
        /* Validate timeout (in case subscription is being cancelled) */
        if (sub->timeout == TIPC_WAIT_FOREVER) {
-               tipc_port_unlock(server_port);
+               spin_unlock_bh(&subscriber->lock);
                return;
        }
 
@@ -171,8 +173,7 @@ static void subscr_timeout(struct tipc_subscription *sub)
        /* Unlink subscription from subscriber */
        list_del(&sub->subscription_list);
 
-       /* Release subscriber's server port */
-       tipc_port_unlock(server_port);
+       spin_unlock_bh(&subscriber->lock);
 
        /* Notify subscriber of timeout */
        subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -181,64 +182,54 @@ static void subscr_timeout(struct tipc_subscription *sub)
        /* Now destroy subscription */
        k_term_timer(&sub->timer);
        kfree(sub);
-       atomic_dec(&topsrv.subscription_count);
+       atomic_dec(&subscription_count);
 }
 
 /**
  * subscr_del - delete a subscription within a subscription list
  *
- * Called with subscriber port locked.
+ * Called with subscriber lock held.
  */
 static void subscr_del(struct tipc_subscription *sub)
 {
        tipc_nametbl_unsubscribe(sub);
        list_del(&sub->subscription_list);
        kfree(sub);
-       atomic_dec(&topsrv.subscription_count);
+       atomic_dec(&subscription_count);
 }
 
 /**
  * subscr_terminate - terminate communication with a subscriber
  *
- * Called with subscriber port locked.  Routine must temporarily release lock
- * to enable subscription timeout routine(s) to finish without deadlocking;
- * the lock is then reclaimed to allow caller to release it upon return.
- * (This should work even in the unlikely event some other thread creates
- * a new object reference in the interim that uses this lock; this routine will
- * simply wait for it to be released, then claim it.)
+ * Note: Must call it in process context since it might sleep.
  */
 static void subscr_terminate(struct tipc_subscriber *subscriber)
 {
-       u32 port_ref;
+       tipc_conn_terminate(&topsrv, subscriber->conid);
+}
+
+static void subscr_release(struct tipc_subscriber *subscriber)
+{
        struct tipc_subscription *sub;
        struct tipc_subscription *sub_temp;
 
-       /* Invalidate subscriber reference */
-       port_ref = subscriber->port_ref;
-       subscriber->port_ref = 0;
-       spin_unlock_bh(subscriber->lock);
+       spin_lock_bh(&subscriber->lock);
 
-       /* Sever connection to subscriber */
-       tipc_shutdown(port_ref);
-       tipc_deleteport(port_ref);
+       /* Invalidate subscriber reference */
+       subscriber->conid = 0;
 
        /* Destroy any existing subscriptions for subscriber */
        list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
                                 subscription_list) {
                if (sub->timeout != TIPC_WAIT_FOREVER) {
+                       spin_unlock_bh(&subscriber->lock);
                        k_cancel_timer(&sub->timer);
                        k_term_timer(&sub->timer);
+                       spin_lock_bh(&subscriber->lock);
                }
                subscr_del(sub);
        }
-
-       /* Remove subscriber from topology server's subscriber list */
-       spin_lock_bh(&topsrv.lock);
-       list_del(&subscriber->subscriber_list);
-       spin_unlock_bh(&topsrv.lock);
-
-       /* Reclaim subscriber lock */
-       spin_lock_bh(subscriber->lock);
+       spin_unlock_bh(&subscriber->lock);
 
        /* Now destroy subscriber */
        kfree(subscriber);
@@ -247,7 +238,7 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
 /**
  * subscr_cancel - handle subscription cancellation request
  *
- * Called with subscriber port locked.  Routine must temporarily release lock
+ * Called with subscriber lock held. Routine must temporarily release lock
  * to enable the subscription timeout routine to finish without deadlocking;
  * the lock is then reclaimed to allow caller to release it upon return.
  *
@@ -274,10 +265,10 @@ static void subscr_cancel(struct tipc_subscr *s,
        /* Cancel subscription timer (if used), then delete subscription */
        if (sub->timeout != TIPC_WAIT_FOREVER) {
                sub->timeout = TIPC_WAIT_FOREVER;
-               spin_unlock_bh(subscriber->lock);
+               spin_unlock_bh(&subscriber->lock);
                k_cancel_timer(&sub->timer);
                k_term_timer(&sub->timer);
-               spin_lock_bh(subscriber->lock);
+               spin_lock_bh(&subscriber->lock);
        }
        subscr_del(sub);
 }
@@ -285,7 +276,7 @@ static void subscr_cancel(struct tipc_subscr *s,
 /**
  * subscr_subscribe - create subscription for subscriber
  *
- * Called with subscriber port locked.
+ * Called with subscriber lock held.
  */
 static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
                                             struct tipc_subscriber *subscriber)
@@ -304,7 +295,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Refuse subscription if global limit exceeded */
-       if (atomic_read(&topsrv.subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+       if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
                        TIPC_MAX_SUBSCRIPTIONS);
                subscr_terminate(subscriber);
@@ -335,10 +326,10 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
        }
        INIT_LIST_HEAD(&sub->nameseq_list);
        list_add(&sub->subscription_list, &subscriber->subscription_list);
-       sub->server_ref = subscriber->port_ref;
+       sub->subscriber = subscriber;
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
-       atomic_inc(&topsrv.subscription_count);
+       atomic_inc(&subscription_count);
        if (sub->timeout != TIPC_WAIT_FOREVER) {
                k_init_timer(&sub->timer,
                             (Handler)subscr_timeout, (unsigned long)sub);
@@ -348,196 +339,51 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
        return sub;
 }
 
-/**
- * subscr_conn_shutdown_event - handle termination request from subscriber
- *
- * Called with subscriber's server port unlocked.
- */
-static void subscr_conn_shutdown_event(void *usr_handle,
-                                      u32 port_ref,
-                                      struct sk_buff **buf,
-                                      unsigned char const *data,
-                                      unsigned int size,
-                                      int reason)
+/* Handle one termination request for the subscriber */
+static void subscr_conn_shutdown_event(int conid, void *usr_data)
 {
-       struct tipc_subscriber *subscriber = usr_handle;
-       spinlock_t *subscriber_lock;
-
-       if (tipc_port_lock(port_ref) == NULL)
-               return;
-
-       subscriber_lock = subscriber->lock;
-       subscr_terminate(subscriber);
-       spin_unlock_bh(subscriber_lock);
+       subscr_release((struct tipc_subscriber *)usr_data);
 }
 
-/**
- * subscr_conn_msg_event - handle new subscription request from subscriber
- *
- * Called with subscriber's server port unlocked.
- */
-static void subscr_conn_msg_event(void *usr_handle,
-                                 u32 port_ref,
-                                 struct sk_buff **buf,
-                                 const unchar *data,
-                                 u32 size)
+/* Handle one request to create a new subscription for the subscriber */
+static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+                                 void *usr_data, void *buf, size_t len)
 {
-       struct tipc_subscriber *subscriber = usr_handle;
-       spinlock_t *subscriber_lock;
+       struct tipc_subscriber *subscriber = usr_data;
        struct tipc_subscription *sub;
 
-       /*
-        * Lock subscriber's server port (& make a local copy of lock pointer,
-        * in case subscriber is deleted while processing subscription request)
-        */
-       if (tipc_port_lock(port_ref) == NULL)
-               return;
-
-       subscriber_lock = subscriber->lock;
-
-       if (size != sizeof(struct tipc_subscr)) {
-               subscr_terminate(subscriber);
-               spin_unlock_bh(subscriber_lock);
-       } else {
-               sub = subscr_subscribe((struct tipc_subscr *)data, subscriber);
-               spin_unlock_bh(subscriber_lock);
-               if (sub != NULL) {
-
-                       /*
-                        * We must release the server port lock before adding a
-                        * subscription to the name table since TIPC needs to be
-                        * able to (re)acquire the port lock if an event message
-                        * issued by the subscription process is rejected and
-                        * returned.  The subscription cannot be deleted while
-                        * it is being added to the name table because:
-                        * a) the single-threading of the native API port code
-                        *    ensures the subscription cannot be cancelled and
-                        *    the subscriber connection cannot be broken, and
-                        * b) the name table lock ensures the subscription
-                        *    timeout code cannot delete the subscription,
-                        * so the subscription object is still protected.
-                        */
-                       tipc_nametbl_subscribe(sub);
-               }
-       }
+       spin_lock_bh(&subscriber->lock);
+       sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
+       if (sub)
+               tipc_nametbl_subscribe(sub);
+       spin_unlock_bh(&subscriber->lock);
 }
 
-/**
- * subscr_named_msg_event - handle request to establish a new subscriber
- */
-static void subscr_named_msg_event(void *usr_handle,
-                                  u32 port_ref,
-                                  struct sk_buff **buf,
-                                  const unchar *data,
-                                  u32 size,
-                                  u32 importance,
-                                  struct tipc_portid const *orig,
-                                  struct tipc_name_seq const *dest)
+
+/* Handle one request to establish a new subscriber */
+static void *subscr_named_msg_event(int conid)
 {
        struct tipc_subscriber *subscriber;
-       u32 server_port_ref;
 
        /* Create subscriber object */
        subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
        if (subscriber == NULL) {
                pr_warn("Subscriber rejected, no memory\n");
-               return;
+               return NULL;
        }
        INIT_LIST_HEAD(&subscriber->subscription_list);
-       INIT_LIST_HEAD(&subscriber->subscriber_list);
-
-       /* Create server port & establish connection to subscriber */
-       tipc_createport(subscriber,
-                       importance,
-                       NULL,
-                       NULL,
-                       subscr_conn_shutdown_event,
-                       NULL,
-                       NULL,
-                       subscr_conn_msg_event,
-                       NULL,
-                       &subscriber->port_ref);
-       if (subscriber->port_ref == 0) {
-               pr_warn("Subscriber rejected, unable to create port\n");
-               kfree(subscriber);
-               return;
-       }
-       tipc_connect(subscriber->port_ref, orig);
-
-       /* Lock server port (& save lock address for future use) */
-       subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
-
-       /* Add subscriber to topology server's subscriber list */
-       spin_lock_bh(&topsrv.lock);
-       list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
-       spin_unlock_bh(&topsrv.lock);
-
-       /* Unlock server port */
-       server_port_ref = subscriber->port_ref;
-       spin_unlock_bh(subscriber->lock);
-
-       /* Send an ACK- to complete connection handshaking */
-       tipc_send(server_port_ref, 0, NULL, 0);
+       subscriber->conid = conid;
+       spin_lock_init(&subscriber->lock);
 
-       /* Handle optional subscription request */
-       if (size != 0) {
-               subscr_conn_msg_event(subscriber, server_port_ref,
-                                     buf, data, size);
-       }
+       return (void *)subscriber;
 }
 
 int tipc_subscr_start(void)
 {
-       struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
-       int res;
-
-       spin_lock_init(&topsrv.lock);
-       INIT_LIST_HEAD(&topsrv.subscriber_list);
-
-       res = tipc_createport(NULL,
-                             TIPC_CRITICAL_IMPORTANCE,
-                             NULL,
-                             NULL,
-                             NULL,
-                             NULL,
-                             subscr_named_msg_event,
-                             NULL,
-                             NULL,
-                             &topsrv.setup_port);
-       if (res)
-               goto failed;
-
-       res = tipc_publish(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
-       if (res) {
-               tipc_deleteport(topsrv.setup_port);
-               topsrv.setup_port = 0;
-               goto failed;
-       }
-
-       return 0;
-
-failed:
-       pr_err("Failed to create subscription service\n");
-       return res;
+       return tipc_server_start(&topsrv);
 }
 
 void tipc_subscr_stop(void)
 {
-       struct tipc_subscriber *subscriber;
-       struct tipc_subscriber *subscriber_temp;
-       spinlock_t *subscriber_lock;
-
-       if (topsrv.setup_port) {
-               tipc_deleteport(topsrv.setup_port);
-               topsrv.setup_port = 0;
-
-               list_for_each_entry_safe(subscriber, subscriber_temp,
-                                        &topsrv.subscriber_list,
-                                        subscriber_list) {
-                       subscriber_lock = subscriber->lock;
-                       spin_lock_bh(subscriber_lock);
-                       subscr_terminate(subscriber);
-                       spin_unlock_bh(subscriber_lock);
-               }
-       }
+       tipc_server_stop(&topsrv);
 }
index 218d2e07f0cc0508db0f2f892d3b5bb74f0e85a6..393e417bee3f51f8703047cdf6ac6a2a6b8654be 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/subscr.h: Include file for TIPC network topology service
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2012-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #ifndef _TIPC_SUBSCR_H
 #define _TIPC_SUBSCR_H
 
+#include "server.h"
+
 struct tipc_subscription;
+struct tipc_subscriber;
 
 /**
  * struct tipc_subscription - TIPC network topology subscription object
+ * @subscriber: pointer to its subscriber
  * @seq: name sequence associated with subscription
  * @timeout: duration of subscription (in ms)
  * @filter: event filtering to be done for subscription
@@ -52,28 +56,23 @@ struct tipc_subscription;
  * @evt: template for events generated by subscription
  */
 struct tipc_subscription {
+       struct tipc_subscriber *subscriber;
        struct tipc_name_seq seq;
        u32 timeout;
        u32 filter;
        struct timer_list timer;
        struct list_head nameseq_list;
        struct list_head subscription_list;
-       u32 server_ref;
        int swap;
        struct tipc_event evt;
 };
 
-int tipc_subscr_overlap(struct tipc_subscription *sub,
-                       u32 found_lower,
+int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
                        u32 found_upper);
 
-void tipc_subscr_report_overlap(struct tipc_subscription *sub,
-                               u32 found_lower,
-                               u32 found_upper,
-                               u32 event,
-                               u32 port_ref,
-                               u32 node,
-                               int must_report);
+void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
+                               u32 found_upper, u32 event, u32 port_ref,
+                               u32 node, int must);
 
 int tipc_subscr_start(void);
 
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
new file mode 100644 (file)
index 0000000..f3fef93
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * net/tipc/sysctl.c: sysctl interface to TIPC subsystem
+ *
+ * Copyright (c) 2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+
+#include <linux/sysctl.h>
+
+static struct ctl_table_header *tipc_ctl_hdr;
+
+static struct ctl_table tipc_table[] = {
+       {
+               .procname       = "tipc_rmem",
+               .data           = &sysctl_tipc_rmem,
+               .maxlen         = sizeof(sysctl_tipc_rmem),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {}
+};
+
+int tipc_register_sysctl(void)
+{
+       tipc_ctl_hdr = register_net_sysctl(&init_net, "net/tipc", tipc_table);
+       if (tipc_ctl_hdr == NULL)
+               return -ENOMEM;
+       return 0;
+}
+
+void tipc_unregister_sysctl(void)
+{
+       unregister_net_sysctl_table(tipc_ctl_hdr);
+}
index 8800604c93f459e1db124aa2a9cafb1a7b5cdb8f..b3d515021b74314f6859867f0a3d749450b2cf5d 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <net/af_unix.h>
 
-static ctl_table unix_table[] = {
+static struct ctl_table unix_table[] = {
        {
                .procname       = "max_dgram_qlen",
                .data           = &init_net.unx.sysctl_max_dgram_qlen,
index 3f77f42a3b58d04662e132e52a89b4f88b701177..593071dabd1cc7477169b24bc31979d8590465b6 100644 (file)
@@ -144,18 +144,18 @@ EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
  * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
  * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
  * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets.  The hash function
- * mods with VSOCK_HASH_SIZE - 1 to ensure this.
+ * mods with VSOCK_HASH_SIZE to ensure this.
  */
 #define VSOCK_HASH_SIZE         251
 #define MAX_PORT_RETRIES        24
 
-#define VSOCK_HASH(addr)        ((addr)->svm_port % (VSOCK_HASH_SIZE - 1))
+#define VSOCK_HASH(addr)        ((addr)->svm_port % VSOCK_HASH_SIZE)
 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
 #define vsock_unbound_sockets     (&vsock_bind_table[VSOCK_HASH_SIZE])
 
 /* XXX This can probably be implemented in a better way. */
 #define VSOCK_CONN_HASH(src, dst)                              \
-       (((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1))
+       (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
 #define vsock_connected_sockets(src, dst)              \
        (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
 #define vsock_connected_sockets_vsk(vsk)                               \
@@ -165,6 +165,18 @@ static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
 static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
 static DEFINE_SPINLOCK(vsock_table_lock);
 
+/* Autobind this socket to the local address if necessary. */
+static int vsock_auto_bind(struct vsock_sock *vsk)
+{
+       struct sock *sk = sk_vsock(vsk);
+       struct sockaddr_vm local_addr;
+
+       if (vsock_addr_bound(&vsk->local_addr))
+               return 0;
+       vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
+       return __vsock_bind(sk, &local_addr);
+}
+
 static void vsock_init_tables(void)
 {
        int i;
@@ -956,15 +968,10 @@ static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
        lock_sock(sk);
 
-       if (!vsock_addr_bound(&vsk->local_addr)) {
-               struct sockaddr_vm local_addr;
-
-               vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
-               err = __vsock_bind(sk, &local_addr);
-               if (err != 0)
-                       goto out;
+       err = vsock_auto_bind(vsk);
+       if (err)
+               goto out;
 
-       }
 
        /* If the provided message contains an address, use that.  Otherwise
         * fall back on the socket's remote handle (if it has been connected).
@@ -1038,15 +1045,9 @@ static int vsock_dgram_connect(struct socket *sock,
 
        lock_sock(sk);
 
-       if (!vsock_addr_bound(&vsk->local_addr)) {
-               struct sockaddr_vm local_addr;
-
-               vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
-               err = __vsock_bind(sk, &local_addr);
-               if (err != 0)
-                       goto out;
-
-       }
+       err = vsock_auto_bind(vsk);
+       if (err)
+               goto out;
 
        if (!transport->dgram_allow(remote_addr->svm_cid,
                                    remote_addr->svm_port)) {
@@ -1163,17 +1164,9 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
                memcpy(&vsk->remote_addr, remote_addr,
                       sizeof(vsk->remote_addr));
 
-               /* Autobind this socket to the local address if necessary. */
-               if (!vsock_addr_bound(&vsk->local_addr)) {
-                       struct sockaddr_vm local_addr;
-
-                       vsock_addr_init(&local_addr, VMADDR_CID_ANY,
-                                       VMADDR_PORT_ANY);
-                       err = __vsock_bind(sk, &local_addr);
-                       if (err != 0)
-                               goto out;
-
-               }
+               err = vsock_auto_bind(vsk);
+               if (err)
+                       goto out;
 
                sk->sk_state = SS_CONNECTING;
 
index daff75200e256705b3e0e9605cf6e6baf6ac17e3..ffc11df02af22cf90c03df730110ea2a96f033cb 100644 (file)
@@ -625,13 +625,14 @@ static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
 
        /* Attach the packet to the socket's receive queue as an sk_buff. */
        skb = alloc_skb(size, GFP_ATOMIC);
-       if (skb) {
-               /* sk_receive_skb() will do a sock_put(), so hold here. */
-               sock_hold(sk);
-               skb_put(skb, size);
-               memcpy(skb->data, dg, size);
-               sk_receive_skb(sk, skb, 0);
-       }
+       if (!skb)
+               return VMCI_ERROR_NO_MEM;
+
+       /* sk_receive_skb() will do a sock_put(), so hold here. */
+       sock_hold(sk);
+       skb_put(skb, size);
+       memcpy(skb->data, dg, size);
+       sk_receive_skb(sk, skb, 0);
 
        return VMCI_SUCCESS;
 }
@@ -939,10 +940,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)
                 * reset to prevent that.
                 */
                vmci_transport_send_reset(sk, pkt);
-               goto out;
+               break;
        }
 
-out:
        release_sock(sk);
        kfree(recv_pkt_info);
        /* Release reference obtained in the stream callback when we fetched
index 672459b9483b48ac19184bc223cd80c6a8b75cf9..4f9f216665e9d4d6753172f3c14d7bbdd3f53154 100644 (file)
@@ -775,10 +775,9 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
 }
 
 static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
-                                        unsigned long state,
-                                        void *ndev)
+                                        unsigned long state, void *ptr)
 {
-       struct net_device *dev = ndev;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev;
        int ret;
index 7dc3343427c198b89342e5014af4412a5f24d2b7..1cc47aca7f05baee3e7ffdf18a4e002b2d62279d 100644 (file)
@@ -1568,8 +1568,10 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
        rtnl_lock();
        if (!state) {
                state = kzalloc(sizeof(*state), GFP_KERNEL);
-               if (!state)
+               if (!state) {
+                       rtnl_unlock();
                        return -ENOMEM;
+               }
                state->filter_wiphy = -1;
                ret = nl80211_dump_wiphy_parse(skb, cb, state);
                if (ret) {
index 37ca9694aabea14efd6308b59ced2e153ac30ae5..1d964e23853f22881151a6c113b813eb431b8d9c 100644 (file)
@@ -224,7 +224,7 @@ static void x25_kill_by_device(struct net_device *dev)
 static int x25_device_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct x25_neigh *nb;
 
        if (!net_eq(dev_net(dev), &init_net))
index ab2bb42fe094b7390d5135ec6e37b9113ea8219b..88843996f9359ec0ef90bc34dd3806ce3e8cedbf 100644 (file)
@@ -163,6 +163,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                skb->sp->xvec[skb->sp->len++] = x;
 
                spin_lock(&x->lock);
+               if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+                       goto drop_unlock;
+               }
+
                if (unlikely(x->km.state != XFRM_STATE_VALID)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
                        goto drop_unlock;
index bcfda8921b5bf944d38ac3e087eb1cccfac327fe..eb4a8428864879a1346fbd7895b4e5afaa4d91e3 100644 (file)
@@ -64,6 +64,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
 
                if (unlikely(x->km.state != XFRM_STATE_VALID)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
+                       err = -EINVAL;
                        goto error;
                }
 
@@ -88,7 +89,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
 
                err = x->type->output(x, skb);
                if (err == -EINPROGRESS)
-                       goto out_exit;
+                       goto out;
 
 resume:
                if (err) {
@@ -106,15 +107,14 @@ resume:
                x = dst->xfrm;
        } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
 
-       err = 0;
+       return 0;
 
-out_exit:
-       return err;
 error:
        spin_unlock_bh(&x->lock);
 error_nolock:
        kfree_skb(skb);
-       goto out_exit;
+out:
+       return err;
 }
 
 int xfrm_output_resume(struct sk_buff *skb, int err)
index 23cea0f74336c72b332e909994e242d216e5e2ae..e52cab3591dd78c373274bb64420f87383775e8c 100644 (file)
@@ -2557,11 +2557,12 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
-static void xfrm_garbage_collect(struct net *net)
+void xfrm_garbage_collect(struct net *net)
 {
        flow_cache_flush();
        __xfrm_garbage_collect(net);
 }
+EXPORT_SYMBOL(xfrm_garbage_collect);
 
 static void xfrm_garbage_collect_deferred(struct net *net)
 {
@@ -2784,7 +2785,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
 
 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        switch (event) {
        case NETDEV_DOWN:
index c721b0d9ab8b355ba75bc92f5e7e602d9655bad6..80cd1e55b834260e484d0c7842fc8d827e0803ca 100644 (file)
@@ -44,6 +44,7 @@ static const struct snmp_mib xfrm_mib_list[] = {
        SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
        SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
        SNMP_MIB_ITEM("XfrmOutStateInvalid", LINUX_MIB_XFRMOUTSTATEINVALID),
+       SNMP_MIB_ITEM("XfrmAcquireError", LINUX_MIB_XFRMACQUIREERROR),
        SNMP_MIB_SENTINEL
 };
 
index aa778748c56592728866ce24096bf46447c2d13c..3f565e495ac68cea83e1d52cf7db7e820fe777ad 100644 (file)
@@ -1681,6 +1681,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 out:
        xfrm_pol_put(xp);
+       if (delete && err == 0)
+               xfrm_garbage_collect(net);
        return err;
 }
 
index 51bb3de680b67b78071ae7f9dcc03ae387c52b60..f97869f1f09b1f1d534f5e2b412934a9680f7d98 100644 (file)
@@ -149,7 +149,7 @@ cpp_flags      = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
 
 ld_flags       = $(LDFLAGS) $(ldflags-y)
 
-dtc_cpp_flags  = -Wp,-MD,$(depfile).pre -nostdinc                        \
+dtc_cpp_flags  = -Wp,-MD,$(depfile).pre.tmp -nostdinc                    \
                 -I$(srctree)/arch/$(SRCARCH)/boot/dts                   \
                 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include           \
                 -undef -D__DTS__
@@ -264,14 +264,14 @@ $(obj)/%.dtb.S: $(obj)/%.dtb
 quiet_cmd_dtc = DTC     $@
 cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
        $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \
-               -i $(srctree)/arch/$(SRCARCH)/boot/dts $(DTC_FLAGS) \
-               -d $(depfile).dtc $(dtc-tmp) ; \
-       cat $(depfile).pre $(depfile).dtc > $(depfile)
+               -i $(dir $<) $(DTC_FLAGS) \
+               -d $(depfile).dtc.tmp $(dtc-tmp) ; \
+       cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
 
 $(obj)/%.dtb: $(src)/%.dts FORCE
        $(call if_changed_dep,dtc)
 
-dtc-tmp = $(subst $(comma),_,$(dot-target).dts)
+dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
 
 # Bzip2
 # ---------------------------------------------------------------------------
index bb4d3deb6d1c0add7815115a14a574212c0f3d90..a65ecbbdd32a90fbadf50aa9622ce555294cf350 100755 (executable)
@@ -105,7 +105,7 @@ while [ "$1" != "" ] ; do
                ;;
        --refresh)
                ;;
-       --*-after)
+       --*-after|-E|-D|-M)
                checkarg "$1"
                A=$ARG
                checkarg "$2"
index 254d5af889562f5e5987db2de1f45f42249d7723..3b41bfca636cea743e575cf3c7dfcb95967e915e 100644 (file)
@@ -71,7 +71,7 @@ static int pop_input_file(void);
                        push_input_file(name);
                }
 
-<*>^"#"(line)?{WS}+[0-9]+{WS}+{STRING}({WS}+[0-9]+)? {
+<*>^"#"(line)?[ \t]+[0-9]+[ \t]+{STRING}([ \t]+[0-9]+)? {
                        char *line, *tmp, *fn;
                        /* skip text before line # */
                        line = yytext;
index a6c5fcdfc032d6103e329bd0e5d9f6ad96de48f5..2d30f41778b7270b074798b4439952dca28d9c53 100644 (file)
@@ -405,19 +405,19 @@ static yyconst flex_int16_t yy_accept[161] =
 static yyconst flex_int32_t yy_ec[256] =
     {   0,
         1,    1,    1,    1,    1,    1,    1,    1,    2,    3,
-        2,    2,    2,    1,    1,    1,    1,    1,    1,    1,
+        4,    4,    4,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    2,    4,    5,    6,    1,    1,    7,    8,    1,
-        1,    9,   10,   10,   11,   10,   12,   13,   14,   15,
-       15,   15,   15,   15,   15,   15,   15,   16,    1,   17,
-       18,   19,   10,   10,   20,   20,   20,   20,   20,   20,
-       21,   21,   21,   21,   21,   22,   21,   21,   21,   21,
-       21,   21,   21,   21,   23,   21,   21,   24,   21,   21,
-        1,   25,   26,    1,   21,    1,   20,   27,   28,   29,
-
-       30,   20,   21,   21,   31,   21,   21,   32,   33,   34,
-       35,   36,   21,   37,   38,   39,   40,   41,   21,   24,
-       42,   21,   43,   44,   45,    1,    1,    1,    1,    1,
+        1,    2,    5,    6,    7,    1,    1,    8,    9,    1,
+        1,   10,   11,   11,   12,   11,   13,   14,   15,   16,
+       16,   16,   16,   16,   16,   16,   16,   17,    1,   18,
+       19,   20,   11,   11,   21,   21,   21,   21,   21,   21,
+       22,   22,   22,   22,   22,   23,   22,   22,   22,   22,
+       22,   22,   22,   22,   24,   22,   22,   25,   22,   22,
+        1,   26,   27,    1,   22,    1,   21,   28,   29,   30,
+
+       31,   21,   22,   22,   32,   22,   22,   33,   34,   35,
+       36,   37,   22,   38,   39,   40,   41,   42,   22,   25,
+       43,   22,   44,   45,   46,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
@@ -434,36 +434,36 @@ static yyconst flex_int32_t yy_ec[256] =
         1,    1,    1,    1,    1
     } ;
 
-static yyconst flex_int32_t yy_meta[46] =
+static yyconst flex_int32_t yy_meta[47] =
     {   0,
-        1,    1,    1,    1,    1,    2,    3,    1,    2,    2,
-        2,    4,    5,    5,    5,    6,    1,    1,    1,    7,
-        8,    8,    8,    8,    1,    1,    7,    7,    7,    7,
-        8,    8,    8,    8,    8,    8,    8,    8,    8,    8,
-        8,    8,    3,    1,    1
+        1,    1,    1,    1,    1,    1,    2,    3,    1,    2,
+        2,    2,    4,    5,    5,    5,    6,    1,    1,    1,
+        7,    8,    8,    8,    8,    1,    1,    7,    7,    7,
+        7,    8,    8,    8,    8,    8,    8,    8,    8,    8,
+        8,    8,    8,    3,    1,    1
     } ;
 
 static yyconst flex_int16_t yy_base[175] =
     {   0,
-        0,  388,  381,   40,   41,  386,   71,  385,   34,   44,
-      390,  395,   60,   62,  371,  112,  111,  111,  111,  104,
-      370,  106,  371,  342,  124,  119,    0,  144,  395,    0,
-      123,    0,  159,  153,  165,  167,  395,  130,  395,  382,
-      395,    0,  372,  122,  395,  157,  374,  379,  350,   21,
-      346,  349,  395,  395,  395,  395,  395,  362,  395,  395,
-      181,  346,  342,  395,  359,    0,  191,  343,  190,  351,
-      350,    0,    0,    0,  173,  362,  177,  367,  357,  329,
-      335,  328,  337,  331,  206,  329,  334,  327,  395,  338,
-      170,  314,  346,  345,  318,  325,  343,  158,  316,  212,
-
-      322,  319,  320,  395,  340,  336,  308,  305,  314,  304,
-      295,  138,  208,  220,  395,  292,  305,  265,  264,  254,
-      201,  222,  285,  275,  273,  270,  236,  235,  225,  115,
-      395,  395,  252,  216,  216,  217,  214,  230,  209,  220,
-      213,  239,  211,  217,  216,  209,  229,  395,  240,  225,
-      206,  169,  395,  395,  116,  106,   99,   54,  395,  395,
-      254,  260,  268,  272,  276,  282,  289,  293,  301,  309,
-      313,  319,  327,  335
+        0,  385,  378,   40,   41,  383,   72,  382,   34,   44,
+      388,  393,   61,  117,  368,  116,  115,  115,  115,   48,
+      367,  107,  368,  339,  127,  120,    0,  147,  393,    0,
+      127,    0,  133,  156,  168,  153,  393,  125,  393,  380,
+      393,    0,  369,  127,  393,  160,  371,  377,  347,   21,
+      343,  346,  393,  393,  393,  393,  393,  359,  393,  393,
+      183,  343,  339,  393,  356,    0,  183,  340,  187,  348,
+      347,    0,    0,    0,  178,  359,  195,  365,  354,  326,
+      332,  325,  334,  328,  204,  326,  331,  324,  393,  335,
+      150,  311,  343,  342,  315,  322,  340,  179,  313,  207,
+
+      319,  316,  317,  393,  337,  333,  305,  302,  311,  301,
+      310,  190,  338,  337,  393,  307,  322,  301,  305,  277,
+      208,  311,  307,  278,  271,  270,  248,  246,  213,  130,
+      393,  393,  263,  235,  207,  221,  218,  229,  213,  213,
+      206,  234,  218,  210,  208,  193,  219,  393,  223,  204,
+      176,  157,  393,  393,  120,  106,   97,  119,  393,  393,
+      245,  251,  259,  263,  267,  273,  280,  284,  292,  300,
+      304,  310,  318,  326
     } ;
 
 static yyconst flex_int16_t yy_def[175] =
@@ -489,108 +489,108 @@ static yyconst flex_int16_t yy_def[175] =
       160,  160,  160,  160
     } ;
 
-static yyconst flex_int16_t yy_nxt[441] =
+static yyconst flex_int16_t yy_nxt[440] =
     {   0,
-       12,   13,   14,   15,   16,   12,   17,   18,   12,   12,
-       12,   19,   12,   12,   12,   12,   20,   21,   22,   23,
-       23,   23,   23,   23,   12,   12,   23,   23,   23,   23,
+       12,   13,   14,   13,   15,   16,   12,   17,   18,   12,
+       12,   12,   19,   12,   12,   12,   12,   20,   21,   22,
+       23,   23,   23,   23,   23,   12,   12,   23,   23,   23,
        23,   23,   23,   23,   23,   23,   23,   23,   23,   23,
-       23,   23,   12,   24,   12,   25,   34,   35,   35,   25,
-       81,   26,   26,   27,   27,   27,   34,   35,   35,   82,
-       28,   36,   36,   36,   36,  159,   29,   28,   28,   28,
-       28,   12,   13,   14,   15,   16,   30,   17,   18,   30,
-       30,   30,   26,   30,   30,   30,   12,   20,   21,   22,
-       31,   31,   31,   31,   31,   32,   12,   31,   31,   31,
+       23,   23,   23,   12,   24,   12,   25,   34,   35,   35,
+       25,   81,   26,   26,   27,   27,   27,   34,   35,   35,
+       82,   28,   36,   36,   36,   53,   54,   29,   28,   28,
+       28,   28,   12,   13,   14,   13,   15,   16,   30,   17,
+       18,   30,   30,   30,   26,   30,   30,   30,   12,   20,
+       21,   22,   31,   31,   31,   31,   31,   32,   12,   31,
 
        31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
-       31,   31,   31,   12,   24,   12,   39,   41,   45,   47,
-       53,   54,   48,   56,   57,   61,   61,   47,   66,   45,
-       48,   66,   66,   66,   39,   46,   40,   49,   59,   50,
-      158,   51,  122,   52,  157,   49,   46,   50,  136,   63,
-      137,   52,  156,   43,   40,   62,   65,   65,   65,   59,
-       61,   61,  123,   65,   75,   69,   69,   69,   36,   36,
-       65,   65,   65,   65,   70,   71,   72,   69,   69,   69,
-       45,   46,   61,   61,  109,   77,   70,   71,   93,  110,
-       68,   70,   71,   85,   85,   85,   66,   46,  155,   66,
-
-       66,   66,   69,   69,   69,  122,   59,  100,  100,   61,
-       61,   70,   71,  100,  100,  148,  112,  154,   85,   85,
-       85,   61,   61,  129,  129,  123,  129,  129,  135,  135,
-      135,  142,  142,  148,  143,  149,  153,  135,  135,  135,
-      142,  142,  160,  143,  152,  151,  150,  146,  145,  144,
-      141,  140,  139,  149,   38,   38,   38,   38,   38,   38,
-       38,   38,   42,  138,  134,  133,   42,   42,   44,   44,
-       44,   44,   44,   44,   44,   44,   58,   58,   58,   58,
-       64,  132,   64,   66,  131,  130,   66,  160,   66,   66,
-       67,  128,  127,   67,   67,   67,   67,   73,  126,   73,
-
-       73,   76,   76,   76,   76,   76,   76,   76,   76,   78,
-       78,   78,   78,   78,   78,   78,   78,   91,  125,   91,
-       92,  124,   92,   92,  120,   92,   92,  121,  121,  121,
-      121,  121,  121,  121,  121,  147,  147,  147,  147,  147,
-      147,  147,  147,  119,  118,  117,  116,  115,   47,  114,
-      110,  113,  111,  108,  107,  106,   48,  105,  104,   89,
-      103,  102,  101,   99,   98,   97,   96,   95,   94,   79,
-       77,   90,   89,   88,   59,   87,   86,   59,   84,   83,
-       80,   79,   77,   74,  160,   60,   59,   55,   37,  160,
-       33,   25,   26,   25,   11,  160,  160,  160,  160,  160,
+       31,   31,   31,   31,   31,   12,   24,   12,   36,   36,
+       36,   39,   41,   45,   47,   56,   57,   48,   61,   47,
+       39,  159,   48,   66,   61,   45,   66,   66,   66,  158,
+       46,   40,   49,   59,   50,  157,   51,   49,   52,   50,
+       40,   63,   46,   52,   36,   36,   36,  156,   43,   62,
+       65,   65,   65,   59,  136,   68,  137,   65,   75,   69,
+       69,   69,   70,   71,   65,   65,   65,   65,   70,   71,
+       72,   69,   69,   69,   61,   46,   45,  155,  154,   66,
+       70,   71,   66,   66,   66,  122,   85,   85,   85,   59,
+
+       69,   69,   69,   46,   77,  100,  109,   93,  100,   70,
+       71,  110,  112,  122,  129,  123,  153,   85,   85,   85,
+      135,  135,  135,  148,  148,  160,  135,  135,  135,  152,
+      142,  142,  142,  123,  143,  142,  142,  142,  151,  143,
+      150,  146,  145,  149,  149,   38,   38,   38,   38,   38,
+       38,   38,   38,   42,  144,  141,  140,   42,   42,   44,
+       44,   44,   44,   44,   44,   44,   44,   58,   58,   58,
+       58,   64,  139,   64,   66,  138,  134,   66,  133,   66,
+       66,   67,  132,  131,   67,   67,   67,   67,   73,  130,
+       73,   73,   76,   76,   76,   76,   76,   76,   76,   76,
+
+       78,   78,   78,   78,   78,   78,   78,   78,   91,  160,
+       91,   92,  129,   92,   92,  128,   92,   92,  121,  121,
+      121,  121,  121,  121,  121,  121,  147,  147,  147,  147,
+      147,  147,  147,  147,  127,  126,  125,  124,   61,   61,
+      120,  119,  118,  117,  116,  115,   47,  114,  110,  113,
+      111,  108,  107,  106,   48,  105,  104,   89,  103,  102,
+      101,   99,   98,   97,   96,   95,   94,   79,   77,   90,
+       89,   88,   59,   87,   86,   59,   84,   83,   80,   79,
+       77,   74,  160,   60,   59,   55,   37,  160,   33,   25,
+       26,   25,   11,  160,  160,  160,  160,  160,  160,  160,
 
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160
+      160,  160,  160,  160,  160,  160,  160,  160,  160
     } ;
 
-static yyconst flex_int16_t yy_chk[441] =
+static yyconst flex_int16_t yy_chk[440] =
     {   0,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    1,    1,    1,    1,    4,    9,    9,    9,   10,
-       50,    4,    5,    5,    5,    5,   10,   10,   10,   50,
-        5,   13,   13,   14,   14,  158,    5,    5,    5,    5,
-        5,    7,    7,    7,    7,    7,    7,    7,    7,    7,
+        1,    1,    1,    1,    1,    1,    4,    9,    9,    9,
+       10,   50,    4,    5,    5,    5,    5,   10,   10,   10,
+       50,    5,   13,   13,   13,   20,   20,    5,    5,    5,
+        5,    5,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
 
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
-        7,    7,    7,    7,    7,    7,   16,   17,   18,   19,
-       20,   20,   19,   22,   22,   25,   25,   26,   31,   44,
-       26,   31,   31,   31,   38,   18,   16,   19,   31,   19,
-      157,   19,  112,   19,  156,   26,   44,   26,  130,   26,
-      130,   26,  155,   17,   38,   25,   28,   28,   28,   28,
-       33,   33,  112,   28,   46,   34,   34,   34,   36,   36,
-       28,   28,   28,   28,   34,   34,   34,   35,   35,   35,
-       75,   46,   61,   61,   98,   77,   35,   35,   77,   98,
-       33,   91,   91,   61,   61,   61,   67,   75,  152,   67,
-
-       67,   67,   69,   69,   69,  121,   67,   85,   85,  113,
-      113,   69,   69,  100,  100,  143,  100,  151,   85,   85,
-       85,  114,  114,  122,  122,  121,  129,  129,  135,  135,
-      135,  138,  138,  147,  138,  143,  150,  129,  129,  129,
-      142,  142,  149,  142,  146,  145,  144,  141,  140,  139,
-      137,  136,  134,  147,  161,  161,  161,  161,  161,  161,
-      161,  161,  162,  133,  128,  127,  162,  162,  163,  163,
-      163,  163,  163,  163,  163,  163,  164,  164,  164,  164,
-      165,  126,  165,  166,  125,  124,  166,  123,  166,  166,
-      167,  120,  119,  167,  167,  167,  167,  168,  118,  168,
-
-      168,  169,  169,  169,  169,  169,  169,  169,  169,  170,
-      170,  170,  170,  170,  170,  170,  170,  171,  117,  171,
-      172,  116,  172,  172,  111,  172,  172,  173,  173,  173,
-      173,  173,  173,  173,  173,  174,  174,  174,  174,  174,
-      174,  174,  174,  110,  109,  108,  107,  106,  105,  103,
-      102,  101,   99,   97,   96,   95,   94,   93,   92,   90,
-       88,   87,   86,   84,   83,   82,   81,   80,   79,   78,
-       76,   71,   70,   68,   65,   63,   62,   58,   52,   51,
-       49,   48,   47,   43,   40,   24,   23,   21,   15,   11,
-        8,    6,    3,    2,  160,  160,  160,  160,  160,  160,
+        7,    7,    7,    7,    7,    7,    7,    7,   14,   14,
+       14,   16,   17,   18,   19,   22,   22,   19,   25,   26,
+       38,  158,   26,   31,   33,   44,   31,   31,   31,  157,
+       18,   16,   19,   31,   19,  156,   19,   26,   19,   26,
+       38,   26,   44,   26,   36,   36,   36,  155,   17,   25,
+       28,   28,   28,   28,  130,   33,  130,   28,   46,   34,
+       34,   34,   91,   91,   28,   28,   28,   28,   34,   34,
+       34,   35,   35,   35,   61,   46,   75,  152,  151,   67,
+       35,   35,   67,   67,   67,  112,   61,   61,   61,   67,
+
+       69,   69,   69,   75,   77,   85,   98,   77,  100,   69,
+       69,   98,  100,  121,  129,  112,  150,   85,   85,   85,
+      135,  135,  135,  143,  147,  149,  129,  129,  129,  146,
+      138,  138,  138,  121,  138,  142,  142,  142,  145,  142,
+      144,  141,  140,  143,  147,  161,  161,  161,  161,  161,
+      161,  161,  161,  162,  139,  137,  136,  162,  162,  163,
+      163,  163,  163,  163,  163,  163,  163,  164,  164,  164,
+      164,  165,  134,  165,  166,  133,  128,  166,  127,  166,
+      166,  167,  126,  125,  167,  167,  167,  167,  168,  124,
+      168,  168,  169,  169,  169,  169,  169,  169,  169,  169,
+
+      170,  170,  170,  170,  170,  170,  170,  170,  171,  123,
+      171,  172,  122,  172,  172,  120,  172,  172,  173,  173,
+      173,  173,  173,  173,  173,  173,  174,  174,  174,  174,
+      174,  174,  174,  174,  119,  118,  117,  116,  114,  113,
+      111,  110,  109,  108,  107,  106,  105,  103,  102,  101,
+       99,   97,   96,   95,   94,   93,   92,   90,   88,   87,
+       86,   84,   83,   82,   81,   80,   79,   78,   76,   71,
+       70,   68,   65,   63,   62,   58,   52,   51,   49,   48,
+       47,   43,   40,   24,   23,   21,   15,   11,    8,    6,
+        3,    2,  160,  160,  160,  160,  160,  160,  160,  160,
 
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160
+      160,  160,  160,  160,  160,  160,  160,  160,  160
     } ;
 
 static yy_state_type yy_last_accepting_state;
index 4af55900a15ba22f1c5b37239038e7d8bb4090ed..ee1d8c3042fbb841447f369676e4b45b34fd6ab8 100644 (file)
@@ -1,10 +1,8 @@
+/* A Bison parser, made by GNU Bison 2.5.  */
 
-/* A Bison parser, made by GNU Bison 2.4.1.  */
-
-/* Skeleton implementation for Bison's Yacc-like parsers in C
+/* Bison implementation for Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
-   Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -46,7 +44,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.4.1"
+#define YYBISON_VERSION "2.5"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
@@ -67,7 +65,7 @@
 
 /* Copy the first part of user declarations.  */
 
-/* Line 189 of yacc.c  */
+/* Line 268 of yacc.c  */
 #line 21 "dtc-parser.y"
 
 #include <stdio.h>
@@ -88,8 +86,8 @@ static unsigned long long eval_literal(const char *s, int base, int bits);
 static unsigned char eval_char_literal(const char *s);
 
 
-/* Line 189 of yacc.c  */
-#line 93 "dtc-parser.tab.c"
+/* Line 268 of yacc.c  */
+#line 91 "dtc-parser.tab.c"
 
 /* Enabling traces.  */
 #ifndef YYDEBUG
@@ -147,7 +145,7 @@ static unsigned char eval_char_literal(const char *s);
 typedef union YYSTYPE
 {
 
-/* Line 214 of yacc.c  */
+/* Line 293 of yacc.c  */
 #line 40 "dtc-parser.y"
 
        char *propnodename;
@@ -171,8 +169,8 @@ typedef union YYSTYPE
 
 
 
-/* Line 214 of yacc.c  */
-#line 176 "dtc-parser.tab.c"
+/* Line 293 of yacc.c  */
+#line 174 "dtc-parser.tab.c"
 } YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
@@ -183,8 +181,8 @@ typedef union YYSTYPE
 /* Copy the second part of user declarations.  */
 
 
-/* Line 264 of yacc.c  */
-#line 188 "dtc-parser.tab.c"
+/* Line 343 of yacc.c  */
+#line 186 "dtc-parser.tab.c"
 
 #ifdef short
 # undef short
@@ -234,7 +232,7 @@ typedef short int yytype_int16;
 #define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
 
 #ifndef YY_
-# if YYENABLE_NLS
+# if defined YYENABLE_NLS && YYENABLE_NLS
 #  if ENABLE_NLS
 #   include <libintl.h> /* INFRINGES ON USER NAME SPACE */
 #   define YY_(msgid) dgettext ("bison-runtime", msgid)
@@ -287,11 +285,11 @@ YYID (yyi)
 #    define alloca _alloca
 #   else
 #    define YYSTACK_ALLOC alloca
-#    if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#    if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 #     include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-#     ifndef _STDLIB_H
-#      define _STDLIB_H 1
+#     ifndef EXIT_SUCCESS
+#      define EXIT_SUCCESS 0
 #     endif
 #    endif
 #   endif
@@ -314,24 +312,24 @@ YYID (yyi)
 #  ifndef YYSTACK_ALLOC_MAXIMUM
 #   define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
 #  endif
-#  if (defined __cplusplus && ! defined _STDLIB_H \
+#  if (defined __cplusplus && ! defined EXIT_SUCCESS \
        && ! ((defined YYMALLOC || defined malloc) \
             && (defined YYFREE || defined free)))
 #   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-#   ifndef _STDLIB_H
-#    define _STDLIB_H 1
+#   ifndef EXIT_SUCCESS
+#    define EXIT_SUCCESS 0
 #   endif
 #  endif
 #  ifndef YYMALLOC
 #   define YYMALLOC malloc
-#   if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#   if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
 #   endif
 #  endif
 #  ifndef YYFREE
 #   define YYFREE free
-#   if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#   if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 void free (void *); /* INFRINGES ON USER NAME SPACE */
 #   endif
@@ -360,23 +358,7 @@ union yyalloc
      ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
       + YYSTACK_GAP_MAXIMUM)
 
-/* Copy COUNT objects from FROM to TO.  The source and destination do
-   not overlap.  */
-# ifndef YYCOPY
-#  if defined __GNUC__ && 1 < __GNUC__
-#   define YYCOPY(To, From, Count) \
-      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
-#  else
-#   define YYCOPY(To, From, Count)             \
-      do                                       \
-       {                                       \
-         YYSIZE_T yyi;                         \
-         for (yyi = 0; yyi < (Count); yyi++)   \
-           (To)[yyi] = (From)[yyi];            \
-       }                                       \
-      while (YYID (0))
-#  endif
-# endif
+# define YYCOPY_NEEDED 1
 
 /* Relocate STACK from its old location to the new one.  The
    local variables YYSIZE and YYSTACKSIZE give the old and new number of
@@ -396,6 +378,26 @@ union yyalloc
 
 #endif
 
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from FROM to TO.  The source and destination do
+   not overlap.  */
+# ifndef YYCOPY
+#  if defined __GNUC__ && 1 < __GNUC__
+#   define YYCOPY(To, From, Count) \
+      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+#  else
+#   define YYCOPY(To, From, Count)             \
+      do                                       \
+       {                                       \
+         YYSIZE_T yyi;                         \
+         for (yyi = 0; yyi < (Count); yyi++)   \
+           (To)[yyi] = (From)[yyi];            \
+       }                                       \
+      while (YYID (0))
+#  endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  4
 /* YYLAST -- Last index in YYTABLE.  */
@@ -571,8 +573,8 @@ static const yytype_uint8 yyr2[] =
        2,     0,     2,     2,     0,     2,     2,     2,     3,     2
 };
 
-/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
-   STATE-NUM when YYTABLE doesn't specify something else to do.  Zero
+/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
+   Performed when YYTABLE doesn't specify something else to do.  Zero
    means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
@@ -633,8 +635,7 @@ static const yytype_int8 yypgoto[] =
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
    positive, shift that token.  If negative, reduce the rule which
-   number is the opposite.  If zero, do what YYDEFACT says.
-   If YYTABLE_NINF, syntax error.  */
+   number is the opposite.  If YYTABLE_NINF, syntax error.  */
 #define YYTABLE_NINF -1
 static const yytype_uint8 yytable[] =
 {
@@ -654,6 +655,12 @@ static const yytype_uint8 yytable[] =
      137,     0,    73,   139
 };
 
+#define yypact_value_is_default(yystate) \
+  ((yystate) == (-78))
+
+#define yytable_value_is_error(yytable_value) \
+  YYID (0)
+
 static const yytype_int16 yycheck[] =
 {
        5,    38,    39,    17,    18,    19,    12,    12,    17,    18,
@@ -705,9 +712,18 @@ static const yytype_uint8 yystos[] =
 
 /* Like YYERROR except do call yyerror.  This remains here temporarily
    to ease the transition to the new meaning of YYERROR, for GCC.
-   Once GCC version 2 has supplanted version 1, this can go.  */
+   Once GCC version 2 has supplanted version 1, this can go.  However,
+   YYFAIL appears to be in use.  Nevertheless, it is formally deprecated
+   in Bison 2.4.2's NEWS entry, where a plan to phase it out is
+   discussed.  */
 
 #define YYFAIL         goto yyerrlab
+#if defined YYFAIL
+  /* This is here to suppress warnings from the GCC cpp's
+     -Wunused-macros.  Normally we don't worry about that warning, but
+     some users do, and we want to make it easy for users to remove
+     YYFAIL uses, which will produce warnings from Bison 2.5.  */
+#endif
 
 #define YYRECOVERING()  (!!yyerrstatus)
 
@@ -717,7 +733,6 @@ do                                                          \
     {                                                          \
       yychar = (Token);                                                \
       yylval = (Value);                                                \
-      yytoken = YYTRANSLATE (yychar);                          \
       YYPOPSTACK (1);                                          \
       goto yybackup;                                           \
     }                                                          \
@@ -759,19 +774,10 @@ while (YYID (0))
 #endif
 
 
-/* YY_LOCATION_PRINT -- Print the location on the stream.
-   This macro was not mandated originally: define only if we know
-   we won't break user code: when these are the locations we know.  */
+/* This macro is provided for backward compatibility. */
 
 #ifndef YY_LOCATION_PRINT
-# if YYLTYPE_IS_TRIVIAL
-#  define YY_LOCATION_PRINT(File, Loc)                 \
-     fprintf (File, "%d.%d-%d.%d",                     \
-             (Loc).first_line, (Loc).first_column,     \
-             (Loc).last_line,  (Loc).last_column)
-# else
-#  define YY_LOCATION_PRINT(File, Loc) ((void) 0)
-# endif
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
 #endif
 
 
@@ -963,7 +969,6 @@ int yydebug;
 # define YYMAXDEPTH 10000
 #endif
 
-\f
 
 #if YYERROR_VERBOSE
 
@@ -1066,115 +1071,142 @@ yytnamerr (char *yyres, const char *yystr)
 }
 # endif
 
-/* Copy into YYRESULT an error message about the unexpected token
-   YYCHAR while in state YYSTATE.  Return the number of bytes copied,
-   including the terminating null byte.  If YYRESULT is null, do not
-   copy anything; just return the number of bytes that would be
-   copied.  As a special case, return 0 if an ordinary "syntax error"
-   message will do.  Return YYSIZE_MAXIMUM if overflow occurs during
-   size calculation.  */
-static YYSIZE_T
-yysyntax_error (char *yyresult, int yystate, int yychar)
-{
-  int yyn = yypact[yystate];
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+   about the unexpected token YYTOKEN for the state stack whose top is
+   YYSSP.
 
-  if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
-    return 0;
-  else
+   Return 0 if *YYMSG was successfully written.  Return 1 if *YYMSG is
+   not large enough to hold the message.  In that case, also set
+   *YYMSG_ALLOC to the required number of bytes.  Return 2 if the
+   required number of bytes is too large to store.  */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+                yytype_int16 *yyssp, int yytoken)
+{
+  YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]);
+  YYSIZE_T yysize = yysize0;
+  YYSIZE_T yysize1;
+  enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+  /* Internationalized format string. */
+  const char *yyformat = 0;
+  /* Arguments of yyformat. */
+  char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+  /* Number of reported tokens (one for the "unexpected", one per
+     "expected"). */
+  int yycount = 0;
+
+  /* There are many possibilities here to consider:
+     - Assume YYFAIL is not used.  It's too flawed to consider.  See
+       <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
+       for details.  YYERROR is fine as it does not invoke this
+       function.
+     - If this state is a consistent state with a default action, then
+       the only way this function was invoked is if the default action
+       is an error action.  In that case, don't check for expected
+       tokens because there are none.
+     - The only way there can be no lookahead present (in yychar) is if
+       this state is a consistent state with a default action.  Thus,
+       detecting the absence of a lookahead is sufficient to determine
+       that there is no unexpected or expected token to report.  In that
+       case, just report a simple "syntax error".
+     - Don't assume there isn't a lookahead just because this state is a
+       consistent state with a default action.  There might have been a
+       previous inconsistent state, consistent state with a non-default
+       action, or user semantic action that manipulated yychar.
+     - Of course, the expected token list depends on states to have
+       correct lookahead information, and it depends on the parser not
+       to perform extra reductions after fetching a lookahead from the
+       scanner and before detecting a syntax error.  Thus, state merging
+       (from LALR or IELR) and default reductions corrupt the expected
+       token list.  However, the list is correct for canonical LR with
+       one exception: it will still contain any token that will not be
+       accepted due to an error action in a later state.
+  */
+  if (yytoken != YYEMPTY)
     {
-      int yytype = YYTRANSLATE (yychar);
-      YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
-      YYSIZE_T yysize = yysize0;
-      YYSIZE_T yysize1;
-      int yysize_overflow = 0;
-      enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
-      char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
-      int yyx;
-
-# if 0
-      /* This is so xgettext sees the translatable formats that are
-        constructed on the fly.  */
-      YY_("syntax error, unexpected %s");
-      YY_("syntax error, unexpected %s, expecting %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s or %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
-# endif
-      char *yyfmt;
-      char const *yyf;
-      static char const yyunexpected[] = "syntax error, unexpected %s";
-      static char const yyexpecting[] = ", expecting %s";
-      static char const yyor[] = " or %s";
-      char yyformat[sizeof yyunexpected
-                   + sizeof yyexpecting - 1
-                   + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
-                      * (sizeof yyor - 1))];
-      char const *yyprefix = yyexpecting;
-
-      /* Start YYX at -YYN if negative to avoid negative indexes in
-        YYCHECK.  */
-      int yyxbegin = yyn < 0 ? -yyn : 0;
-
-      /* Stay within bounds of both yycheck and yytname.  */
-      int yychecklim = YYLAST - yyn + 1;
-      int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
-      int yycount = 1;
-
-      yyarg[0] = yytname[yytype];
-      yyfmt = yystpcpy (yyformat, yyunexpected);
-
-      for (yyx = yyxbegin; yyx < yyxend; ++yyx)
-       if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
-         {
-           if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
-             {
-               yycount = 1;
-               yysize = yysize0;
-               yyformat[sizeof yyunexpected - 1] = '\0';
-               break;
-             }
-           yyarg[yycount++] = yytname[yyx];
-           yysize1 = yysize + yytnamerr (0, yytname[yyx]);
-           yysize_overflow |= (yysize1 < yysize);
-           yysize = yysize1;
-           yyfmt = yystpcpy (yyfmt, yyprefix);
-           yyprefix = yyor;
-         }
+      int yyn = yypact[*yyssp];
+      yyarg[yycount++] = yytname[yytoken];
+      if (!yypact_value_is_default (yyn))
+        {
+          /* Start YYX at -YYN if negative to avoid negative indexes in
+             YYCHECK.  In other words, skip the first -YYN actions for
+             this state because they are default actions.  */
+          int yyxbegin = yyn < 0 ? -yyn : 0;
+          /* Stay within bounds of both yycheck and yytname.  */
+          int yychecklim = YYLAST - yyn + 1;
+          int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+          int yyx;
+
+          for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+            if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+                && !yytable_value_is_error (yytable[yyx + yyn]))
+              {
+                if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+                  {
+                    yycount = 1;
+                    yysize = yysize0;
+                    break;
+                  }
+                yyarg[yycount++] = yytname[yyx];
+                yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+                if (! (yysize <= yysize1
+                       && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+                  return 2;
+                yysize = yysize1;
+              }
+        }
+    }
 
-      yyf = YY_(yyformat);
-      yysize1 = yysize + yystrlen (yyf);
-      yysize_overflow |= (yysize1 < yysize);
-      yysize = yysize1;
+  switch (yycount)
+    {
+# define YYCASE_(N, S)                      \
+      case N:                               \
+        yyformat = S;                       \
+      break
+      YYCASE_(0, YY_("syntax error"));
+      YYCASE_(1, YY_("syntax error, unexpected %s"));
+      YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+      YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+      YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+      YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+    }
 
-      if (yysize_overflow)
-       return YYSIZE_MAXIMUM;
+  yysize1 = yysize + yystrlen (yyformat);
+  if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+    return 2;
+  yysize = yysize1;
 
-      if (yyresult)
-       {
-         /* Avoid sprintf, as that infringes on the user's name space.
-            Don't have undefined behavior even if the translation
-            produced a string with the wrong number of "%s"s.  */
-         char *yyp = yyresult;
-         int yyi = 0;
-         while ((*yyp = *yyf) != '\0')
-           {
-             if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
-               {
-                 yyp += yytnamerr (yyp, yyarg[yyi++]);
-                 yyf += 2;
-               }
-             else
-               {
-                 yyp++;
-                 yyf++;
-               }
-           }
-       }
-      return yysize;
+  if (*yymsg_alloc < yysize)
+    {
+      *yymsg_alloc = 2 * yysize;
+      if (! (yysize <= *yymsg_alloc
+             && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+        *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+      return 1;
     }
+
+  /* Avoid sprintf, as that infringes on the user's name space.
+     Don't have undefined behavior even if the translation
+     produced a string with the wrong number of "%s"s.  */
+  {
+    char *yyp = *yymsg;
+    int yyi = 0;
+    while ((*yyp = *yyformat) != '\0')
+      if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+        {
+          yyp += yytnamerr (yyp, yyarg[yyi++]);
+          yyformat += 2;
+        }
+      else
+        {
+          yyp++;
+          yyformat++;
+        }
+  }
+  return 0;
 }
 #endif /* YYERROR_VERBOSE */
-\f
 
 /*-----------------------------------------------.
 | Release the memory associated to this symbol.  |
@@ -1207,6 +1239,7 @@ yydestruct (yymsg, yytype, yyvaluep)
     }
 }
 
+
 /* Prevent warnings from -Wmissing-prototypes.  */
 #ifdef YYPARSE_PARAM
 #if defined __STDC__ || defined __cplusplus
@@ -1233,10 +1266,9 @@ YYSTYPE yylval;
 int yynerrs;
 
 
-
-/*-------------------------.
-| yyparse or yypush_parse.  |
-`-------------------------*/
+/*----------.
+| yyparse.  |
+`----------*/
 
 #ifdef YYPARSE_PARAM
 #if (defined __STDC__ || defined __C99__FUNC__ \
@@ -1260,8 +1292,6 @@ yyparse ()
 #endif
 #endif
 {
-
-
     int yystate;
     /* Number of tokens to shift before error messages enabled.  */
     int yyerrstatus;
@@ -1416,7 +1446,7 @@ yybackup:
 
   /* First try to decide what to do without reference to lookahead token.  */
   yyn = yypact[yystate];
-  if (yyn == YYPACT_NINF)
+  if (yypact_value_is_default (yyn))
     goto yydefault;
 
   /* Not known => get a lookahead token if don't already have one.  */
@@ -1447,8 +1477,8 @@ yybackup:
   yyn = yytable[yyn];
   if (yyn <= 0)
     {
-      if (yyn == 0 || yyn == YYTABLE_NINF)
-       goto yyerrlab;
+      if (yytable_value_is_error (yyn))
+        goto yyerrlab;
       yyn = -yyn;
       goto yyreduce;
     }
@@ -1503,72 +1533,72 @@ yyreduce:
     {
         case 2:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 110 "dtc-parser.y"
     {
                        the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node),
                                                        guess_boot_cpuid((yyvsp[(4) - (4)].node)));
-               ;}
+               }
     break;
 
   case 3:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 118 "dtc-parser.y"
     {
                        (yyval.re) = NULL;
-               ;}
+               }
     break;
 
   case 4:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 122 "dtc-parser.y"
     {
                        (yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
-               ;}
+               }
     break;
 
   case 5:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 129 "dtc-parser.y"
     {
                        (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer));
-               ;}
+               }
     break;
 
   case 6:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 133 "dtc-parser.y"
     {
                        add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref));
                        (yyval.re) = (yyvsp[(2) - (2)].re);
-               ;}
+               }
     break;
 
   case 7:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 141 "dtc-parser.y"
     {
                        (yyval.node) = name_node((yyvsp[(2) - (2)].node), "");
-               ;}
+               }
     break;
 
   case 8:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 145 "dtc-parser.y"
     {
                        (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
-               ;}
+               }
     break;
 
   case 9:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 149 "dtc-parser.y"
     {
                        struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref));
@@ -1578,12 +1608,12 @@ yyreduce:
                        else
                                print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref));
                        (yyval.node) = (yyvsp[(1) - (3)].node);
-               ;}
+               }
     break;
 
   case 10:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 159 "dtc-parser.y"
     {
                        struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref));
@@ -1594,112 +1624,112 @@ yyreduce:
                                delete_node(target);
 
                        (yyval.node) = (yyvsp[(1) - (4)].node);
-               ;}
+               }
     break;
 
   case 11:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 173 "dtc-parser.y"
     {
                        (yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist));
-               ;}
+               }
     break;
 
   case 12:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 180 "dtc-parser.y"
     {
                        (yyval.proplist) = NULL;
-               ;}
+               }
     break;
 
   case 13:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 184 "dtc-parser.y"
     {
                        (yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist));
-               ;}
+               }
     break;
 
   case 14:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 191 "dtc-parser.y"
     {
                        (yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data));
-               ;}
+               }
     break;
 
   case 15:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 195 "dtc-parser.y"
     {
                        (yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data);
-               ;}
+               }
     break;
 
   case 16:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 199 "dtc-parser.y"
     {
                        (yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename));
-               ;}
+               }
     break;
 
   case 17:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 203 "dtc-parser.y"
     {
                        add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref));
                        (yyval.prop) = (yyvsp[(2) - (2)].prop);
-               ;}
+               }
     break;
 
   case 18:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 211 "dtc-parser.y"
     {
                        (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
-               ;}
+               }
     break;
 
   case 19:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 215 "dtc-parser.y"
     {
                        (yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data);
-               ;}
+               }
     break;
 
   case 20:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 219 "dtc-parser.y"
     {
                        (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
-               ;}
+               }
     break;
 
   case 21:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 223 "dtc-parser.y"
     {
                        (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
-               ;}
+               }
     break;
 
   case 22:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 227 "dtc-parser.y"
     {
                        FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL);
@@ -1716,12 +1746,12 @@ yyreduce:
 
                        (yyval.data) = data_merge((yyvsp[(1) - (9)].data), d);
                        fclose(f);
-               ;}
+               }
     break;
 
   case 23:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 244 "dtc-parser.y"
     {
                        FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL);
@@ -1731,48 +1761,48 @@ yyreduce:
 
                        (yyval.data) = data_merge((yyvsp[(1) - (5)].data), d);
                        fclose(f);
-               ;}
+               }
     break;
 
   case 24:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 254 "dtc-parser.y"
     {
                        (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
-               ;}
+               }
     break;
 
   case 25:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 261 "dtc-parser.y"
     {
                        (yyval.data) = empty_data;
-               ;}
+               }
     break;
 
   case 26:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 265 "dtc-parser.y"
     {
                        (yyval.data) = (yyvsp[(1) - (2)].data);
-               ;}
+               }
     break;
 
   case 27:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 269 "dtc-parser.y"
     {
                        (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
-               ;}
+               }
     break;
 
   case 28:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 276 "dtc-parser.y"
     {
                        (yyval.array).data = empty_data;
@@ -1787,22 +1817,22 @@ yyreduce:
                                            " are currently supported");
                                (yyval.array).bits = 32;
                        }
-               ;}
+               }
     break;
 
   case 29:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 291 "dtc-parser.y"
     {
                        (yyval.array).data = empty_data;
                        (yyval.array).bits = 32;
-               ;}
+               }
     break;
 
   case 30:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 296 "dtc-parser.y"
     {
                        if ((yyvsp[(1) - (2)].array).bits < 64) {
@@ -1822,12 +1852,12 @@ yyreduce:
                        }
 
                        (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits);
-               ;}
+               }
     break;
 
   case 31:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 316 "dtc-parser.y"
     {
                        uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits);
@@ -1841,288 +1871,299 @@ yyreduce:
                                            "arrays with 32-bit elements.");
 
                        (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits);
-               ;}
+               }
     break;
 
   case 32:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 330 "dtc-parser.y"
     {
                        (yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref));
-               ;}
+               }
     break;
 
   case 33:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 337 "dtc-parser.y"
     {
                        (yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
-               ;}
+               }
     break;
 
   case 34:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 341 "dtc-parser.y"
     {
                        (yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal));
-               ;}
+               }
     break;
 
   case 35:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 345 "dtc-parser.y"
     {
                        (yyval.integer) = (yyvsp[(2) - (3)].integer);
-               ;}
+               }
     break;
 
   case 38:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 356 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); }
     break;
 
   case 40:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 361 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); }
     break;
 
   case 42:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 366 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); }
     break;
 
   case 44:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 371 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); }
     break;
 
   case 46:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 376 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); }
     break;
 
   case 48:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 381 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); }
     break;
 
   case 50:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 386 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); }
     break;
 
   case 51:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 387 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); }
     break;
 
   case 53:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 392 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); }
     break;
 
   case 54:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 393 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); }
     break;
 
   case 55:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 394 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); }
     break;
 
   case 56:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 395 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); }
     break;
 
   case 57:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 399 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); }
     break;
 
   case 58:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 400 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); }
     break;
 
   case 60:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 405 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); }
     break;
 
   case 61:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 406 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); }
     break;
 
   case 63:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 411 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); }
     break;
 
   case 64:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 412 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); }
     break;
 
   case 65:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 413 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); }
     break;
 
   case 68:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 419 "dtc-parser.y"
-    { (yyval.integer) = -(yyvsp[(2) - (2)].integer); ;}
+    { (yyval.integer) = -(yyvsp[(2) - (2)].integer); }
     break;
 
   case 69:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 420 "dtc-parser.y"
-    { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); ;}
+    { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); }
     break;
 
   case 70:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 421 "dtc-parser.y"
-    { (yyval.integer) = !(yyvsp[(2) - (2)].integer); ;}
+    { (yyval.integer) = !(yyvsp[(2) - (2)].integer); }
     break;
 
   case 71:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 426 "dtc-parser.y"
     {
                        (yyval.data) = empty_data;
-               ;}
+               }
     break;
 
   case 72:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 430 "dtc-parser.y"
     {
                        (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
-               ;}
+               }
     break;
 
   case 73:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 434 "dtc-parser.y"
     {
                        (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
-               ;}
+               }
     break;
 
   case 74:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 441 "dtc-parser.y"
     {
                        (yyval.nodelist) = NULL;
-               ;}
+               }
     break;
 
   case 75:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 445 "dtc-parser.y"
     {
                        (yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist));
-               ;}
+               }
     break;
 
   case 76:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 449 "dtc-parser.y"
     {
                        print_error("syntax error: properties must precede subnodes");
                        YYERROR;
-               ;}
+               }
     break;
 
   case 77:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 457 "dtc-parser.y"
     {
                        (yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename));
-               ;}
+               }
     break;
 
   case 78:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 461 "dtc-parser.y"
     {
                        (yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename));
-               ;}
+               }
     break;
 
   case 79:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 465 "dtc-parser.y"
     {
                        add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref));
                        (yyval.node) = (yyvsp[(2) - (2)].node);
-               ;}
+               }
     break;
 
 
 
-/* Line 1455 of yacc.c  */
-#line 2124 "dtc-parser.tab.c"
+/* Line 1806 of yacc.c  */
+#line 2154 "dtc-parser.tab.c"
       default: break;
     }
+  /* User semantic actions sometimes alter yychar, and that requires
+     that yytoken be updated with the new translation.  We take the
+     approach of translating immediately before every use of yytoken.
+     One alternative is translating here after every semantic action,
+     but that translation would be missed if the semantic action invokes
+     YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+     if it invokes YYBACKUP.  In the case of YYABORT or YYACCEPT, an
+     incorrect destructor might then be invoked immediately.  In the
+     case of YYERROR or YYBACKUP, subsequent parser actions might lead
+     to an incorrect destructor call or verbose syntax error message
+     before the lookahead is translated.  */
   YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
 
   YYPOPSTACK (yylen);
@@ -2150,6 +2191,10 @@ yyreduce:
 | yyerrlab -- here on detecting error |
 `------------------------------------*/
 yyerrlab:
+  /* Make sure we have latest lookahead translation.  See comments at
+     user semantic actions for why this is necessary.  */
+  yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
   /* If not already recovering from an error, report this error.  */
   if (!yyerrstatus)
     {
@@ -2157,37 +2202,36 @@ yyerrlab:
 #if ! YYERROR_VERBOSE
       yyerror (YY_("syntax error"));
 #else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+                                        yyssp, yytoken)
       {
-       YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
-       if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
-         {
-           YYSIZE_T yyalloc = 2 * yysize;
-           if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
-             yyalloc = YYSTACK_ALLOC_MAXIMUM;
-           if (yymsg != yymsgbuf)
-             YYSTACK_FREE (yymsg);
-           yymsg = (char *) YYSTACK_ALLOC (yyalloc);
-           if (yymsg)
-             yymsg_alloc = yyalloc;
-           else
-             {
-               yymsg = yymsgbuf;
-               yymsg_alloc = sizeof yymsgbuf;
-             }
-         }
-
-       if (0 < yysize && yysize <= yymsg_alloc)
-         {
-           (void) yysyntax_error (yymsg, yystate, yychar);
-           yyerror (yymsg);
-         }
-       else
-         {
-           yyerror (YY_("syntax error"));
-           if (yysize != 0)
-             goto yyexhaustedlab;
-         }
+        char const *yymsgp = YY_("syntax error");
+        int yysyntax_error_status;
+        yysyntax_error_status = YYSYNTAX_ERROR;
+        if (yysyntax_error_status == 0)
+          yymsgp = yymsg;
+        else if (yysyntax_error_status == 1)
+          {
+            if (yymsg != yymsgbuf)
+              YYSTACK_FREE (yymsg);
+            yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+            if (!yymsg)
+              {
+                yymsg = yymsgbuf;
+                yymsg_alloc = sizeof yymsgbuf;
+                yysyntax_error_status = 2;
+              }
+            else
+              {
+                yysyntax_error_status = YYSYNTAX_ERROR;
+                yymsgp = yymsg;
+              }
+          }
+        yyerror (yymsgp);
+        if (yysyntax_error_status == 2)
+          goto yyexhaustedlab;
       }
+# undef YYSYNTAX_ERROR
 #endif
     }
 
@@ -2246,7 +2290,7 @@ yyerrlab1:
   for (;;)
     {
       yyn = yypact[yystate];
-      if (yyn != YYPACT_NINF)
+      if (!yypact_value_is_default (yyn))
        {
          yyn += YYTERROR;
          if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
@@ -2305,8 +2349,13 @@ yyexhaustedlab:
 
 yyreturn:
   if (yychar != YYEMPTY)
-     yydestruct ("Cleanup: discarding lookahead",
-                yytoken, &yylval);
+    {
+      /* Make sure we have latest lookahead translation.  See comments at
+         user semantic actions for why this is necessary.  */
+      yytoken = YYTRANSLATE (yychar);
+      yydestruct ("Cleanup: discarding lookahead",
+                  yytoken, &yylval);
+    }
   /* Do not reclaim the symbols of the rule which action triggered
      this YYABORT or YYACCEPT.  */
   YYPOPSTACK (yylen);
@@ -2331,7 +2380,7 @@ yyreturn:
 
 
 
-/* Line 1675 of yacc.c  */
+/* Line 2067 of yacc.c  */
 #line 471 "dtc-parser.y"
 
 
index 9d2dce41211f05774ee7c26425fd3c5e1b3e3609..25d3b88c61320bb2525341a66ec2fc556a2fc800 100644 (file)
@@ -1,10 +1,8 @@
+/* A Bison parser, made by GNU Bison 2.5.  */
 
-/* A Bison parser, made by GNU Bison 2.4.1.  */
-
-/* Skeleton interface for Bison's Yacc-like parsers in C
+/* Bison interface for Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
-   Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -70,7 +68,7 @@
 typedef union YYSTYPE
 {
 
-/* Line 1676 of yacc.c  */
+/* Line 2068 of yacc.c  */
 #line 40 "dtc-parser.y"
 
        char *propnodename;
@@ -94,8 +92,8 @@ typedef union YYSTYPE
 
 
 
-/* Line 1676 of yacc.c  */
-#line 99 "dtc-parser.tab.h"
+/* Line 2068 of yacc.c  */
+#line 97 "dtc-parser.tab.h"
 } YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
index 48d382e7e3746bebd850114d4f7557dbc37c49db..38cd69c5660e5163bfc402617de0d702e3372bd6 100644 (file)
@@ -303,10 +303,11 @@ do_resize:
                                }
                }
 
-               if (i < max_choice ||
-                   key == KEY_UP || key == KEY_DOWN ||
-                   key == '-' || key == '+' ||
-                   key == KEY_PPAGE || key == KEY_NPAGE) {
+               if (item_count() != 0 &&
+                   (i < max_choice ||
+                    key == KEY_UP || key == KEY_DOWN ||
+                    key == '-' || key == '+' ||
+                    key == KEY_PPAGE || key == KEY_NPAGE)) {
                        /* Remove highligt of current item */
                        print_item(scroll + choice, choice, FALSE);
 
index 387dc8daf7b2d43ff96b735f6bd60b4c650f85a8..a69cbd78fb38e62f3f6c3978ef6a3935551aaabc 100644 (file)
@@ -670,11 +670,12 @@ static void conf(struct menu *menu, struct menu *active_menu)
                                  active_menu, &s_scroll);
                if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL)
                        break;
-               if (!item_activate_selected())
-                       continue;
-               if (!item_tag())
-                       continue;
-
+               if (item_count() != 0) {
+                       if (!item_activate_selected())
+                               continue;
+                       if (!item_tag())
+                               continue;
+               }
                submenu = item_data();
                active_menu = item_data();
                if (submenu)
index b5c7d90df9df801dac0ca12d64b609e686e74b62..fd3f0180e08fbafb537e128c9e46641288c68774 100644 (file)
@@ -146,11 +146,24 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e
                        struct menu *menu = current_entry;
 
                        while ((menu = menu->parent) != NULL) {
+                               struct expr *dup_expr;
+
                                if (!menu->visibility)
                                        continue;
+                               /*
+                                * Do not add a reference to the
+                                * menu's visibility expression but
+                                * use a copy of it.  Otherwise the
+                                * expression reduction functions
+                                * will modify expressions that have
+                                * multiple references which can
+                                * cause unwanted side effects.
+                                */
+                               dup_expr = expr_copy(menu->visibility);
+
                                prop->visible.expr
                                        = expr_alloc_and(prop->visible.expr,
-                                                        menu->visibility);
+                                                        dup_expr);
                        }
                }
 
index 84a406070f6f2b3f7ca48998bdce1a7d1ad7c3b4..a4f31c900fa6a44ab75d1a4f3916d2bb1075c448 100644 (file)
@@ -63,7 +63,7 @@ binrpm-pkg: FORCE
        mv -f $(objtree)/.tmp_version $(objtree)/.version
 
        $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \
-               $(UTS_MACHINE) -bb $<
+               $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
        rm binkernel.spec
 
 # Deb target
index 47a49d1a6f6adf7b60873fcfb1249b1ee023f5b9..694e9e43855f15862227e6caf726189deec5f0fc 100644 (file)
@@ -264,7 +264,7 @@ static int sel_netif_avc_callback(u32 event)
 static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
                                             unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        if (dev_net(dev) != &init_net)
                return NOTIFY_DONE;
index 8ab2951545170e6e81fb71fc7ee1e8cad5c2dd72..d030818862146732ebe30c8cc3f266d485ef0677 100644 (file)
@@ -316,6 +316,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
 
                memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
                memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
+               atomic_inc(&selinux_xfrm_refcount);
                *new_ctxp = new_ctx;
        }
        return 0;
@@ -326,6 +327,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
  */
 void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
 {
+       atomic_dec(&selinux_xfrm_refcount);
        kfree(ctx);
 }
 
@@ -335,17 +337,13 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
 int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
 {
        const struct task_security_struct *tsec = current_security();
-       int rc = 0;
 
-       if (ctx) {
-               rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
-                                 SECCLASS_ASSOCIATION,
-                                 ASSOCIATION__SETCONTEXT, NULL);
-               if (rc == 0)
-                       atomic_dec(&selinux_xfrm_refcount);
-       }
+       if (!ctx)
+               return 0;
 
-       return rc;
+       return avc_has_perm(tsec->sid, ctx->ctx_sid,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+                           NULL);
 }
 
 /*
@@ -370,8 +368,8 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
  */
 void selinux_xfrm_state_free(struct xfrm_state *x)
 {
-       struct xfrm_sec_ctx *ctx = x->security;
-       kfree(ctx);
+       atomic_dec(&selinux_xfrm_refcount);
+       kfree(x->security);
 }
 
  /*
@@ -381,17 +379,13 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
 {
        const struct task_security_struct *tsec = current_security();
        struct xfrm_sec_ctx *ctx = x->security;
-       int rc = 0;
 
-       if (ctx) {
-               rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
-                                 SECCLASS_ASSOCIATION,
-                                 ASSOCIATION__SETCONTEXT, NULL);
-               if (rc == 0)
-                       atomic_dec(&selinux_xfrm_refcount);
-       }
+       if (!ctx)
+               return 0;
 
-       return rc;
+       return avc_has_perm(tsec->sid, ctx->ctx_sid,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+                           NULL);
 }
 
 /*
index 552b97afbca5346a96ca6187602ba0326ce4ef28..61ab640e195f3cbf6ee30d574116c797eead5a99 100644 (file)
@@ -113,6 +113,7 @@ MODULE_ALIAS("sound-layout-100");
 MODULE_ALIAS("aoa-device-id-14");
 MODULE_ALIAS("aoa-device-id-22");
 MODULE_ALIAS("aoa-device-id-35");
+MODULE_ALIAS("aoa-device-id-44");
 
 /* onyx with all but microphone connected */
 static struct codec_connection onyx_connections_nomic[] = {
@@ -361,6 +362,13 @@ static struct layout layouts[] = {
                .connections = tas_connections_nolineout,
          },
        },
+       /* PowerBook6,5 */
+       { .device_id = 44,
+         .codecs[0] = {
+               .name = "tas",
+               .connections = tas_connections_all,
+         },
+       },
        /* PowerBook6,7 */
        { .layout_id = 80,
          .codecs[0] = {
index 010658335881cf133b918d2159817267c375a522..15e76131b5015dd88cccf93ea08837694b1f03c7 100644 (file)
@@ -200,7 +200,8 @@ static int i2sbus_add_dev(struct macio_dev *macio,
                         * We probably cannot handle all device-id machines,
                         * so restrict to those we do handle for now.
                         */
-                       if (id && (*id == 22 || *id == 14 || *id == 35)) {
+                       if (id && (*id == 22 || *id == 14 || *id == 35 ||
+                                  *id == 44)) {
                                snprintf(dev->sound.modalias, 32,
                                         "aoa-device-id-%d", *id);
                                ok = 1;
index ccfa383f1fda33d5cd8e4d6ed16e42ed875cc5a1..f92818155958d2a166d3a90f1f2ed2670e9217d2 100644 (file)
@@ -1649,6 +1649,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
        }
        if (!snd_pcm_stream_linked(substream)) {
                substream->group = group;
+               group = NULL;
                spin_lock_init(&substream->group->lock);
                INIT_LIST_HEAD(&substream->group->substreams);
                list_add_tail(&substream->link_list, &substream->group->substreams);
@@ -1663,8 +1664,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
  _nolock:
        snd_card_unref(substream1->pcm->card);
        fput_light(file, fput_needed);
-       if (res < 0)
-               kfree(group);
+       kfree(group);
        return res;
 }
 
index 51c4ba95a32d3961375a593d41afe538ab787760..1a9640254433e22ce61264a006fd6d52db0aad97 100644 (file)
@@ -250,7 +250,7 @@ config MSND_FIFOSIZE
 menuconfig SOUND_OSS
        tristate "OSS sound modules"
        depends on ISA_DMA_API && VIRT_TO_BUS
-       depends on !ISA_DMA_SUPPORT_BROKEN
+       depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
        help
          OSS is the Open Sound System suite of sound card drivers.  They make
          sound programming easier since they provide a common API.  Say Y or
index ac079f93c5354ae576eca7745047883e4b0d051c..4b1524a861f38e2aee10d8bd39bf3352d82ed0b8 100644 (file)
@@ -606,6 +606,10 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
        return false;
 }
 
+/* check whether the NID is referred by any active paths */
+#define is_active_nid_for_any(codec, nid) \
+       is_active_nid(codec, nid, HDA_OUTPUT, 0)
+
 /* get the default amp value for the target state */
 static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
                                   int dir, unsigned int caps, bool enable)
@@ -759,7 +763,8 @@ static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path)
 
        for (i = 0; i < path->depth; i++) {
                hda_nid_t nid = path->path[i];
-               if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3)) {
+               if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) &&
+                   !is_active_nid_for_any(codec, nid)) {
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_POWER_STATE,
                                            AC_PWRST_D3);
@@ -783,6 +788,8 @@ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable)
                return;
        if (codec->inv_eapd)
                enable = !enable;
+       if (spec->keep_eapd_on && !enable)
+               return;
        snd_hda_codec_update_cache(codec, pin, 0,
                                   AC_VERB_SET_EAPD_BTLENABLE,
                                   enable ? 0x02 : 0x00);
@@ -1933,17 +1940,7 @@ static int create_speaker_out_ctls(struct hda_codec *codec)
  * independent HP controls
  */
 
-/* update HP auto-mute state too */
-static void update_hp_automute_hook(struct hda_codec *codec)
-{
-       struct hda_gen_spec *spec = codec->spec;
-
-       if (spec->hp_automute_hook)
-               spec->hp_automute_hook(codec, NULL);
-       else
-               snd_hda_gen_hp_automute(codec, NULL);
-}
-
+static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack);
 static int indep_hp_info(struct snd_kcontrol *kcontrol,
                         struct snd_ctl_elem_info *uinfo)
 {
@@ -2004,7 +2001,7 @@ static int indep_hp_put(struct snd_kcontrol *kcontrol,
                else
                        *dacp = spec->alt_dac_nid;
 
-               update_hp_automute_hook(codec);
+               call_hp_automute(codec, NULL);
                ret = 1;
        }
  unlock:
@@ -2300,7 +2297,7 @@ static void update_hp_mic(struct hda_codec *codec, int adc_mux, bool force)
                else
                        val = PIN_HP;
                set_pin_target(codec, pin, val, true);
-               update_hp_automute_hook(codec);
+               call_hp_automute(codec, NULL);
        }
 }
 
@@ -2709,7 +2706,7 @@ static int hp_mic_jack_mode_put(struct snd_kcontrol *kcontrol,
                        val = snd_hda_get_default_vref(codec, nid);
        }
        snd_hda_set_pin_ctl_cache(codec, nid, val);
-       update_hp_automute_hook(codec);
+       call_hp_automute(codec, NULL);
 
        return 1;
 }
@@ -3854,20 +3851,42 @@ void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *ja
 }
 EXPORT_SYMBOL_HDA(snd_hda_gen_mic_autoswitch);
 
-/* update jack retasking */
-static void update_automute_all(struct hda_codec *codec)
+/* call appropriate hooks */
+static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
 {
        struct hda_gen_spec *spec = codec->spec;
+       if (spec->hp_automute_hook)
+               spec->hp_automute_hook(codec, jack);
+       else
+               snd_hda_gen_hp_automute(codec, jack);
+}
 
-       update_hp_automute_hook(codec);
+static void call_line_automute(struct hda_codec *codec,
+                              struct hda_jack_tbl *jack)
+{
+       struct hda_gen_spec *spec = codec->spec;
        if (spec->line_automute_hook)
-               spec->line_automute_hook(codec, NULL);
+               spec->line_automute_hook(codec, jack);
        else
-               snd_hda_gen_line_automute(codec, NULL);
+               snd_hda_gen_line_automute(codec, jack);
+}
+
+static void call_mic_autoswitch(struct hda_codec *codec,
+                               struct hda_jack_tbl *jack)
+{
+       struct hda_gen_spec *spec = codec->spec;
        if (spec->mic_autoswitch_hook)
-               spec->mic_autoswitch_hook(codec, NULL);
+               spec->mic_autoswitch_hook(codec, jack);
        else
-               snd_hda_gen_mic_autoswitch(codec, NULL);
+               snd_hda_gen_mic_autoswitch(codec, jack);
+}
+
+/* update jack retasking */
+static void update_automute_all(struct hda_codec *codec)
+{
+       call_hp_automute(codec, NULL);
+       call_line_automute(codec, NULL);
+       call_mic_autoswitch(codec, NULL);
 }
 
 /*
@@ -4004,9 +4023,7 @@ static int check_auto_mute_availability(struct hda_codec *codec)
                snd_printdd("hda-codec: Enable HP auto-muting on NID 0x%x\n",
                            nid);
                snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT,
-                                                   spec->hp_automute_hook ?
-                                                   spec->hp_automute_hook :
-                                                   snd_hda_gen_hp_automute);
+                                                   call_hp_automute);
                spec->detect_hp = 1;
        }
 
@@ -4019,9 +4036,7 @@ static int check_auto_mute_availability(struct hda_codec *codec)
                                snd_printdd("hda-codec: Enable Line-Out auto-muting on NID 0x%x\n", nid);
                                snd_hda_jack_detect_enable_callback(codec, nid,
                                                                    HDA_GEN_FRONT_EVENT,
-                                                                   spec->line_automute_hook ?
-                                                                   spec->line_automute_hook :
-                                                                   snd_hda_gen_line_automute);
+                                                                   call_line_automute);
                                spec->detect_lo = 1;
                        }
                spec->automute_lo_possible = spec->detect_hp;
@@ -4063,9 +4078,7 @@ static bool auto_mic_check_imux(struct hda_codec *codec)
                snd_hda_jack_detect_enable_callback(codec,
                                                    spec->am_entry[i].pin,
                                                    HDA_GEN_MIC_EVENT,
-                                                   spec->mic_autoswitch_hook ?
-                                                   spec->mic_autoswitch_hook :
-                                                   snd_hda_gen_mic_autoswitch);
+                                                   call_mic_autoswitch);
        return true;
 }
 
@@ -4157,7 +4170,7 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
                return power_state;
        if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER)
                return power_state;
-       if (is_active_nid(codec, nid, HDA_OUTPUT, 0))
+       if (is_active_nid_for_any(codec, nid))
                return power_state;
        return AC_PWRST_D3;
 }
index 54e66516037980c6ecb36be0b30ca4488151d3a3..76200314ee9566e89cf97fa6ff2821d91de37cea 100644 (file)
@@ -222,6 +222,7 @@ struct hda_gen_spec {
        unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */
        unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */
        unsigned int own_eapd_ctl:1; /* set EAPD by own function */
+       unsigned int keep_eapd_on:1; /* don't turn off EAPD automatically */
        unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */
        unsigned int indep_hp:1; /* independent HP supported */
        unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */
index 6bf47f7326ad4964fc3b4f3403360df6ef52bfd8..02e22b4458d2ed5222c5985ec8bb2593c4a5c405 100644 (file)
@@ -3482,6 +3482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3492,6 +3493,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
        SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -3529,6 +3532,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
index e0dadcf2030d6995197a47c025b085fb7b70a7e3..e5245544eb52bbecaaf0ff3e60b6f4f25d80d3c3 100644 (file)
@@ -136,6 +136,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
                spec->codec_type = VT1708S;
        spec->no_pin_power_ctl = 1;
        spec->gen.indep_hp = 1;
+       spec->gen.keep_eapd_on = 1;
        spec->gen.pcm_playback_hook = via_playback_pcm_hook;
        return spec;
 }
@@ -231,9 +232,14 @@ static void vt1708_update_hp_work(struct hda_codec *codec)
 
 static void set_widgets_power_state(struct hda_codec *codec)
 {
+#if 0 /* FIXME: the assumed connections don't match always with the
+       * actual routes by the generic parser, so better to disable
+       * the control for safety.
+       */
        struct via_spec *spec = codec->spec;
        if (spec->set_widgets_power_state)
                spec->set_widgets_power_state(codec);
+#endif
 }
 
 static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
@@ -478,7 +484,9 @@ static int via_suspend(struct hda_codec *codec)
                /* Fix pop noise on headphones */
                int i;
                for (i = 0; i < spec->gen.autocfg.hp_outs; i++)
-                       snd_hda_set_pin_ctl(codec, spec->gen.autocfg.hp_pins[i], 0);
+                       snd_hda_codec_write(codec, spec->gen.autocfg.hp_pins[i],
+                                           0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+                                           0x00);
        }
 
        return 0;
index d59abe1682c58f9d394340d48c38e1860f906e89..748e82d4d25715b43e4b6790513941fe200f0191 100644 (file)
@@ -1341,7 +1341,8 @@ static int sis_chip_create(struct snd_card *card,
        if (rc)
                goto error_out;
 
-       if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0) {
+       rc = pci_set_dma_mask(pci, DMA_BIT_MASK(30));
+       if (rc < 0) {
                dev_err(&pci->dev, "architecture does not support 30-bit PCI busmaster DMA");
                goto error_out_enabled;
        }
index 114f69a0c6290b43cb90ab591e02af3f14cb4f9d..306d0bc8455faeb202f4c13d87892355350fa0b3 100644 (file)
 
 /* AB8500_ADSLOTSELX */
 #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD  0x00
-#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD  0x01
-#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD  0x02
-#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD  0x03
-#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD  0x04
-#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD  0x05
-#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD  0x06
-#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD  0x07
-#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD   0x08
-#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD  0x10
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD  0x20
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD  0x30
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD  0x40
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD  0x50
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD  0x60
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD  0x70
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD   0x80
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0xF0
 #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00
-#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10
-#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20
-#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30
-#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40
-#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50
-#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60
-#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70
-#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN  0x80
-#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN        0xF0
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x01
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x02
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x03
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x04
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x05
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x06
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x07
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN  0x08
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN        0x0F
 #define AB8500_ADSLOTSELX_EVEN_SHIFT           0
 #define AB8500_ADSLOTSELX_ODD_SHIFT            4
 
index 0f6f481cec09cd941688ed790ecd31e10e207496..987f728718c52cad7ee2a5f1c5dd8dc2fc6d3f02 100644 (file)
@@ -86,7 +86,7 @@ static const struct reg_default cs42l52_reg_defaults[] = {
        { CS42L52_BEEP_VOL, 0x00 },     /* r1D Beep Volume off Time */
        { CS42L52_BEEP_TONE_CTL, 0x00 },        /* r1E Beep Tone Cfg. */
        { CS42L52_TONE_CTL, 0x00 },     /* r1F Tone Ctl */
-       { CS42L52_MASTERA_VOL, 0x88 },  /* r20 Master A Volume */
+       { CS42L52_MASTERA_VOL, 0x00 },  /* r20 Master A Volume */
        { CS42L52_MASTERB_VOL, 0x00 },  /* r21 Master B Volume */
        { CS42L52_HPA_VOL, 0x00 },      /* r22 Headphone A Volume */
        { CS42L52_HPB_VOL, 0x00 },      /* r23 Headphone B Volume */
@@ -193,6 +193,8 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0);
 
 static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
 
+static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
+
 static const unsigned int limiter_tlv[] = {
        TLV_DB_RANGE_HEAD(2),
        0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -225,7 +227,7 @@ static const char * const mic_bias_level_text[] = {
 };
 
 static const struct soc_enum mic_bias_level_enum =
-       SOC_ENUM_SINGLE(CS42L52_IFACE_CTL1, 0,
+       SOC_ENUM_SINGLE(CS42L52_IFACE_CTL2, 0,
                        ARRAY_SIZE(mic_bias_level_text), mic_bias_level_text);
 
 static const char * const cs42l52_mic_text[] = { "Single", "Differential" };
@@ -260,7 +262,7 @@ static const char * const hp_gain_num_text[] = {
 };
 
 static const struct soc_enum hp_gain_enum =
-       SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 4,
+       SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 5,
                ARRAY_SIZE(hp_gain_num_text), hp_gain_num_text);
 
 static const char * const beep_pitch_text[] = {
@@ -413,7 +415,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
        SOC_ENUM("Headphone Analog Gain", hp_gain_enum),
 
        SOC_DOUBLE_R_SX_TLV("Speaker Volume", CS42L52_SPKA_VOL,
-                             CS42L52_SPKB_VOL, 7, 0x1, 0xff, hl_tlv),
+                             CS42L52_SPKB_VOL, 0, 0x1, 0xff, hl_tlv),
 
        SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL,
                              CS42L52_PASSTHRUB_VOL, 6, 0x18, 0x90, pga_tlv),
@@ -441,7 +443,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
 
        SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume",
                            CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL,
-                               6, 0x7f, 0x19, hl_tlv),
+                               0, 0x7f, 0x19, mix_tlv),
        SOC_DOUBLE_R("PCM Mixer Switch",
                     CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1),
 
index 60985c0590711e728fe2229fe459706d42d1ee57..4277012c471978ef1905a196c2d51cc43c636703 100644 (file)
 #define CS42L52_PB_CTL1_INV_PCMA               (1 << 2)
 #define CS42L52_PB_CTL1_MSTB_MUTE              (1 << 1)
 #define CS42L52_PB_CTL1_MSTA_MUTE              (1 << 0)
-#define CS42L52_PB_CTL1_MUTE_MASK              0xFFFD
+#define CS42L52_PB_CTL1_MUTE_MASK              0x03
 #define CS42L52_PB_CTL1_MUTE                   3
 #define CS42L52_PB_CTL1_UNMUTE                 0
 
index 41230ad1c3e038e8337c053f5948af2c60745eee..4a6f1daf911fc0685f16ea5f54fe774bc58581ac 100644 (file)
@@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec)
                                     DA7213_DMIC_DATA_SEL_SHIFT);
                        break;
                }
-               switch (pdata->dmic_data_sel) {
+               switch (pdata->dmic_samplephase) {
                case DA7213_DMIC_SAMPLE_ON_CLKEDGE:
                case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE:
-                       dmic_cfg |= (pdata->dmic_data_sel <<
+                       dmic_cfg |= (pdata->dmic_samplephase <<
                                     DA7213_DMIC_SAMPLEPHASE_SHIFT);
                        break;
                }
-               switch (pdata->dmic_data_sel) {
+               switch (pdata->dmic_clk_rate) {
                case DA7213_DMIC_CLK_3_0MHZ:
                case DA7213_DMIC_CLK_1_5MHZ:
-                       dmic_cfg |= (pdata->dmic_data_sel <<
+                       dmic_cfg |= (pdata->dmic_clk_rate <<
                                     DA7213_DMIC_CLK_RATE_SHIFT);
                        break;
                }
index ce0d36412c97d73eaa27c59de88c0ee44ce6066c..8d14a76c7249ade32bdc627fee92a5cefe6eaf91 100644 (file)
@@ -2233,7 +2233,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
        dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
 
        ret = request_threaded_irq(max98090->irq, NULL,
-               max98090_interrupt, IRQF_TRIGGER_FALLING,
+               max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                "max98090_interrupt", codec);
        if (ret < 0) {
                dev_err(codec->dev, "request_irq failed: %d\n",
index 65d09d60b7c634c55284eca35f3dc128db10307e..1514bf845e4b1e6e5e8ab37b7400632d5d60f3ad 100644 (file)
@@ -187,14 +187,14 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
 
                        break;
                }
-
-               if (found)
-                       snd_soc_dapm_sync(widget->dapm);
        }
 
-       ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
-
        mutex_unlock(&widget->codec->mutex);
+
+       if (found)
+               snd_soc_dapm_sync(widget->dapm);
+
+       ret = snd_soc_update_bits_locked(widget->codec, reg, val_mask, val);
        return ret;
 }
 
index 8df2b6e1a1a6c7fa74cb6cb0b8f94afced8efc2e..370af0cbcc9a97393c695a55936fb22b6ec6e103 100644 (file)
@@ -667,6 +667,7 @@ static int wm0010_boot(struct snd_soc_codec *codec)
                /* On wm0010 only the CLKCTRL1 value is used */
                pll_rec.clkctrl1 = wm0010->pll_clkctrl1;
 
+               ret = -ENOMEM;
                len = pll_rec.length + 8;
                out = kzalloc(len, GFP_KERNEL);
                if (!out) {
index e895d3939eef0769177a4887f0d27355dffa333f..100fdadda56a7e99f6d98874da5c3a89a923815a 100644 (file)
@@ -1120,7 +1120,8 @@ SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
 ARIZONA_DSP_WIDGETS(DSP1, "DSP1"),
 
 SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
-                      ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5102_aec_loopback_mux),
+                      ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
+                      &wm5102_aec_loopback_mux),
 
 SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
                   ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
index 731884e04776289449e0a135905eb883a217cb80..88ad7db52ddeedf5b9934e9afe8f6561e189828f 100644 (file)
@@ -190,7 +190,7 @@ ARIZONA_MIXER_CONTROLS("DSP2R", ARIZONA_DSP2RMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DSP5R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
 
 ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
@@ -503,7 +503,8 @@ SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0,
                 NULL, 0),
 
 SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
-                      ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5110_aec_loopback_mux),
+                      ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
+                      &wm5110_aec_loopback_mux),
 
 SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
                     ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
@@ -976,6 +977,8 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
        if (ret != 0)
                return ret;
 
+       arizona_init_spk(codec);
+
        snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
 
        priv->core.arizona->dapm = &codec->dapm;
index 1eb152cb10970d06a09b98995871581105e6fa51..29e95f93d482201a91cd0523c008bb2ca2b01bd3 100644 (file)
@@ -383,6 +383,8 @@ static int wm8994_get_drc_enum(struct snd_kcontrol *kcontrol,
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        int drc = wm8994_get_drc(kcontrol->id.name);
 
+       if (drc < 0)
+               return drc;
        ucontrol->value.enumerated.item[0] = wm8994->drc_cfg[drc];
 
        return 0;
@@ -488,6 +490,9 @@ static int wm8994_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
 
+       if (block < 0)
+               return block;
+
        ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block];
 
        return 0;
@@ -1031,7 +1036,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_codec *codec = w->codec;
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-       struct wm8994 *control = codec->control_data;
+       struct wm8994 *control = wm8994->wm8994;
        int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
        int i;
        int dac;
@@ -3831,8 +3836,14 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
                                ret);
                } else if (!(ret & WM1811_JACKDET_LVL)) {
                        dev_dbg(codec->dev, "Ignoring removed jack\n");
-                       return IRQ_HANDLED;
+                       goto out;
                }
+       } else if (!(reg & WM8958_MICD_STS)) {
+               snd_soc_jack_report(wm8994->micdet[0].jack, 0,
+                                   SND_JACK_MECHANICAL | SND_JACK_HEADSET |
+                                   wm8994->btn_mask);
+               wm8994->mic_detecting = true;
+               goto out;
        }
 
        if (wm8994->mic_detecting)
index 56ecfc72f2e9500ebda81214d972b65f291cd518..81490febac6dc108decb890ac318ffbec9c2ab8b 100644 (file)
@@ -631,7 +631,8 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
                                       int word_length)
 {
        u32 fmt;
-       u32 rotate = (word_length / 4) & 0x7;
+       u32 tx_rotate = (word_length / 4) & 0x7;
+       u32 rx_rotate = (32 - word_length) / 4;
        u32 mask = (1ULL << word_length) - 1;
 
        /*
@@ -655,9 +656,9 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
                mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
                                TXSSZ(fmt), TXSSZ(0x0F));
                mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
-                               TXROT(rotate), TXROT(7));
+                               TXROT(tx_rotate), TXROT(7));
                mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
-                               RXROT(rotate), RXROT(7));
+                               RXROT(rx_rotate), RXROT(7));
                mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG,
                                mask);
        }
index 902fab02b8512aab0288788b18781ff032da8798..c6fa03e2114ab985186cb8f86a3ee6f4317dc4bc 100644 (file)
@@ -540,11 +540,6 @@ static int imx_ssi_probe(struct platform_device *pdev)
        clk_prepare_enable(ssi->clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               ret = -ENODEV;
-               goto failed_get_resource;
-       }
-
        ssi->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(ssi->base)) {
                ret = PTR_ERR(ssi->base);
@@ -633,7 +628,6 @@ failed_pdev_fiq_alloc:
        snd_soc_unregister_component(&pdev->dev);
 failed_register:
        release_mem_region(res->start, resource_size(res));
-failed_get_resource:
        clk_disable_unprepare(ssi->clk);
 failed_clk:
 
index befe68f59285df63b653436a4d2d0dd2d1202ec7..4c9dad3263c5f7b754d108b3bf66a3ebdb6d67f3 100644 (file)
@@ -471,11 +471,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, priv);
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(&pdev->dev, "platform_get_resource failed\n");
-               return -ENXIO;
-       }
-
        priv->io = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(priv->io))
                return PTR_ERR(priv->io);
index 3853f7eb3f2843f35c8f908cf47caab47931952a..06a8000aa07bedd1c47beb401d26e9052512fc54 100644 (file)
@@ -220,8 +220,12 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
                        goto err;
        }
 
-       snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
-                               SND_SOC_DAPM_STREAM_START);
+       if (cstream->direction == SND_COMPRESS_PLAYBACK)
+               snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
+                                       SND_SOC_DAPM_STREAM_START);
+       else
+               snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
+                                       SND_SOC_DAPM_STREAM_START);
 
        /* cancel any delayed stream shutdown that is pending */
        rtd->pop_wait = 0;
index a80c883bb8be29eeba512d86e46af833d472c833..c7051c457b75c0f09acdd9cd1e39ef62eda22f0f 100644 (file)
@@ -55,7 +55,8 @@ static int dapm_up_seq[] = {
        [snd_soc_dapm_clock_supply] = 1,
        [snd_soc_dapm_micbias] = 2,
        [snd_soc_dapm_dai_link] = 2,
-       [snd_soc_dapm_dai] = 3,
+       [snd_soc_dapm_dai_in] = 3,
+       [snd_soc_dapm_dai_out] = 3,
        [snd_soc_dapm_aif_in] = 3,
        [snd_soc_dapm_aif_out] = 3,
        [snd_soc_dapm_mic] = 4,
@@ -92,7 +93,8 @@ static int dapm_down_seq[] = {
        [snd_soc_dapm_value_mux] = 9,
        [snd_soc_dapm_aif_in] = 10,
        [snd_soc_dapm_aif_out] = 10,
-       [snd_soc_dapm_dai] = 10,
+       [snd_soc_dapm_dai_in] = 10,
+       [snd_soc_dapm_dai_out] = 10,
        [snd_soc_dapm_dai_link] = 11,
        [snd_soc_dapm_clock_supply] = 12,
        [snd_soc_dapm_regulator_supply] = 12,
@@ -419,7 +421,8 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
        case snd_soc_dapm_clock_supply:
        case snd_soc_dapm_aif_in:
        case snd_soc_dapm_aif_out:
-       case snd_soc_dapm_dai:
+       case snd_soc_dapm_dai_in:
+       case snd_soc_dapm_dai_out:
        case snd_soc_dapm_hp:
        case snd_soc_dapm_mic:
        case snd_soc_dapm_spk:
@@ -820,7 +823,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
        switch (widget->id) {
        case snd_soc_dapm_adc:
        case snd_soc_dapm_aif_out:
-       case snd_soc_dapm_dai:
+       case snd_soc_dapm_dai_out:
                if (widget->active) {
                        widget->outputs = snd_soc_dapm_suspend_check(widget);
                        return widget->outputs;
@@ -916,7 +919,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
        switch (widget->id) {
        case snd_soc_dapm_dac:
        case snd_soc_dapm_aif_in:
-       case snd_soc_dapm_dai:
+       case snd_soc_dapm_dai_in:
                if (widget->active) {
                        widget->inputs = snd_soc_dapm_suspend_check(widget);
                        return widget->inputs;
@@ -1135,16 +1138,6 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
        return out != 0 && in != 0;
 }
 
-static int dapm_dai_check_power(struct snd_soc_dapm_widget *w)
-{
-       DAPM_UPDATE_STAT(w, power_checks);
-
-       if (w->active)
-               return w->active;
-
-       return dapm_generic_check_power(w);
-}
-
 /* Check to see if an ADC has power */
 static int dapm_adc_check_power(struct snd_soc_dapm_widget *w)
 {
@@ -2318,7 +2311,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_clock_supply:
        case snd_soc_dapm_aif_in:
        case snd_soc_dapm_aif_out:
-       case snd_soc_dapm_dai:
+       case snd_soc_dapm_dai_in:
+       case snd_soc_dapm_dai_out:
        case snd_soc_dapm_dai_link:
                list_add(&path->list, &dapm->card->paths);
                list_add(&path->list_sink, &wsink->sources);
@@ -3129,10 +3123,12 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
                break;
        case snd_soc_dapm_adc:
        case snd_soc_dapm_aif_out:
+       case snd_soc_dapm_dai_out:
                w->power_check = dapm_adc_check_power;
                break;
        case snd_soc_dapm_dac:
        case snd_soc_dapm_aif_in:
+       case snd_soc_dapm_dai_in:
                w->power_check = dapm_dac_check_power;
                break;
        case snd_soc_dapm_pga:
@@ -3152,9 +3148,6 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_clock_supply:
                w->power_check = dapm_supply_check_power;
                break;
-       case snd_soc_dapm_dai:
-               w->power_check = dapm_dai_check_power;
-               break;
        default:
                w->power_check = dapm_always_on_check_power;
                break;
@@ -3375,7 +3368,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
        template.reg = SND_SOC_NOPM;
 
        if (dai->driver->playback.stream_name) {
-               template.id = snd_soc_dapm_dai;
+               template.id = snd_soc_dapm_dai_in;
                template.name = dai->driver->playback.stream_name;
                template.sname = dai->driver->playback.stream_name;
 
@@ -3393,7 +3386,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
        }
 
        if (dai->driver->capture.stream_name) {
-               template.id = snd_soc_dapm_dai;
+               template.id = snd_soc_dapm_dai_out;
                template.name = dai->driver->capture.stream_name;
                template.sname = dai->driver->capture.stream_name;
 
@@ -3423,8 +3416,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
 
        /* For each DAI widget... */
        list_for_each_entry(dai_w, &card->widgets, list) {
-               if (dai_w->id != snd_soc_dapm_dai)
+               switch (dai_w->id) {
+               case snd_soc_dapm_dai_in:
+               case snd_soc_dapm_dai_out:
+                       break;
+               default:
                        continue;
+               }
 
                dai = dai_w->priv;
 
@@ -3433,8 +3431,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
                        if (w->dapm != dai_w->dapm)
                                continue;
 
-                       if (w->id == snd_soc_dapm_dai)
+                       switch (w->id) {
+                       case snd_soc_dapm_dai_in:
+                       case snd_soc_dapm_dai_out:
                                continue;
+                       default:
+                               break;
+                       }
 
                        if (!w->sname)
                                continue;
index 73bb8eefa4913d2b77b468a5dabe22566c61687b..ccb6be4d658d27e3dd56528e02fdf5a59eecb999 100644 (file)
@@ -928,8 +928,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
        /* Create any new FE <--> BE connections */
        for (i = 0; i < list->num_widgets; i++) {
 
-               if (list->widgets[i]->id != snd_soc_dapm_dai)
+               switch (list->widgets[i]->id) {
+               case snd_soc_dapm_dai_in:
+               case snd_soc_dapm_dai_out:
+                       break;
+               default:
                        continue;
+               }
 
                /* is there a valid BE rtd for this widget */
                be = dpcm_get_be(card, list->widgets[i], stream);
@@ -2011,9 +2016,11 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
                if (cpu_dai->driver->capture.channels_min)
                        capture = 1;
        } else {
-               if (codec_dai->driver->playback.channels_min)
+               if (codec_dai->driver->playback.channels_min &&
+                   cpu_dai->driver->playback.channels_min)
                        playback = 1;
-               if (codec_dai->driver->capture.channels_min)
+               if (codec_dai->driver->capture.channels_min &&
+                   cpu_dai->driver->capture.channels_min)
                        capture = 1;
        }
 
index a1d9b0792a1e5b1b68816ad794a77fb76bae2398..b9defcdeb7ef805af05a6453ce309a2eb64bdb18 100644 (file)
@@ -42,8 +42,8 @@ static const u8 ep_w_max_packet_size[] = {
        0x94, 0x01, 0x5c, 0x02  /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */
 };
 
-static const u8 known_fw_versions[][4] = {
-       { 0x03, 0x01, 0x0b, 0x00 }
+static const u8 known_fw_versions[][2] = {
+       { 0x03, 0x01 }
 };
 
 struct ihex_record {
@@ -343,7 +343,7 @@ static int usb6fire_fw_check(u8 *version)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++)
-               if (!memcmp(version, known_fw_versions + i, 4))
+               if (!memcmp(version, known_fw_versions + i, 2))
                        return 0;
 
        snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. "
index ca4739c3f65021fdef6d8493fb11225ad2746fb1..e5c7f9f20fddbea288de5287ab6923314986570a 100644 (file)
@@ -886,6 +886,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
        case USB_ID(0x046d, 0x0808):
        case USB_ID(0x046d, 0x0809):
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+       case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
        case USB_ID(0x046d, 0x0991):
        /* Most audio usb devices lie about volume resolution.
         * Most Logitech webcams have res = 384.
index 135c7687106303fe53a156decdf434150aaba02e..5f761ab34c0140b1b62673e46fc59659488b8cc7 100644 (file)
@@ -116,21 +116,22 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
 }
 
 static void proc_dump_ep_status(struct snd_usb_substream *subs,
-                               struct snd_usb_endpoint *ep,
+                               struct snd_usb_endpoint *data_ep,
+                               struct snd_usb_endpoint *sync_ep,
                                struct snd_info_buffer *buffer)
 {
-       if (!ep)
+       if (!data_ep)
                return;
-       snd_iprintf(buffer, "    Packet Size = %d\n", ep->curpacksize);
+       snd_iprintf(buffer, "    Packet Size = %d\n", data_ep->curpacksize);
        snd_iprintf(buffer, "    Momentary freq = %u Hz (%#x.%04x)\n",
                    subs->speed == USB_SPEED_FULL
-                   ? get_full_speed_hz(ep->freqm)
-                   : get_high_speed_hz(ep->freqm),
-                   ep->freqm >> 16, ep->freqm & 0xffff);
-       if (ep->freqshift != INT_MIN) {
-               int res = 16 - ep->freqshift;
+                   ? get_full_speed_hz(data_ep->freqm)
+                   : get_high_speed_hz(data_ep->freqm),
+                   data_ep->freqm >> 16, data_ep->freqm & 0xffff);
+       if (sync_ep && data_ep->freqshift != INT_MIN) {
+               int res = 16 - data_ep->freqshift;
                snd_iprintf(buffer, "    Feedback Format = %d.%d\n",
-                           (ep->syncmaxsize > 3 ? 32 : 24) - res, res);
+                           (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res);
        }
 }
 
@@ -140,8 +141,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
                snd_iprintf(buffer, "  Status: Running\n");
                snd_iprintf(buffer, "    Interface = %d\n", subs->interface);
                snd_iprintf(buffer, "    Altset = %d\n", subs->altset_idx);
-               proc_dump_ep_status(subs, subs->data_endpoint, buffer);
-               proc_dump_ep_status(subs, subs->sync_endpoint, buffer);
+               proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer);
        } else {
                snd_iprintf(buffer, "  Status: Stop\n");
        }
index 7f1722f82c89ed293a450dd009d552a3ef3f61cf..8b75bcf136f6d17e91053f93820ae3b7042da027 100644 (file)
        .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
 },
 {
-       USB_DEVICE(0x046d, 0x0990),
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+                      USB_DEVICE_ID_MATCH_INT_CLASS |
+                      USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+       .idVendor = 0x046d,
+       .idProduct = 0x0990,
+       .bInterfaceClass = USB_CLASS_AUDIO,
+       .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
        .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
                .vendor_name = "Logitech, Inc.",
                .product_name = "QuickCam Pro 9000",
@@ -1792,7 +1798,11 @@ YAMAHA_DEVICE(0x7010, "UB99"),
        USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108),
        .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
                .ifnum = 0,
-               .type = QUIRK_MIDI_STANDARD_INTERFACE
+               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+               .data = & (const struct snd_usb_midi_endpoint_info) {
+                       .out_cables = 0x0007,
+                       .in_cables  = 0x0007
+               }
        }
 },
 {
index a4ffc95000238cde8f086970f9d23d042ba99c59..b5740599aabd38ee1eebdbc2ce3f66b58a0437a6 100755 (executable)
@@ -15,35 +15,38 @@ kallsyms = []
 
 def get_kallsyms_table():
        global kallsyms
+
        try:
                f = open("/proc/kallsyms", "r")
-               linecount = 0
-               for line in f:
-                       linecount = linecount+1
-               f.seek(0)
        except:
                return
 
-
-       j = 0
        for line in f:
                loc = int(line.split()[0], 16)
                name = line.split()[2]
-               j = j +1
-               if ((j % 100) == 0):
-                       print "\r" + str(j) + "/" + str(linecount),
-               kallsyms.append({ 'loc': loc, 'name' : name})
-
-       print "\r" + str(j) + "/" + str(linecount)
+               kallsyms.append((loc, name))
        kallsyms.sort()
-       return
 
 def get_sym(sloc):
        loc = int(sloc)
-       for i in kallsyms:
-               if (i['loc'] >= loc):
-                       return (i['name'], i['loc']-loc)
-       return (None, 0)
+
+       # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
+       #            kallsyms[i][0] > loc for all end <= i < len(kallsyms)
+       start, end = -1, len(kallsyms)
+       while end != start + 1:
+               pivot = (start + end) // 2
+               if loc < kallsyms[pivot][0]:
+                       end = pivot
+               else:
+                       start = pivot
+
+       # Now (start == -1 or kallsyms[start][0] <= loc)
+       # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
+       if start >= 0:
+               symloc, name = kallsyms[start]
+               return (name, loc - symloc)
+       else:
+               return (None, 0)
 
 def print_drop_table():
        print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
@@ -64,7 +67,7 @@ def trace_end():
 
 # called from perf, when it finds a correspoinding event
 def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
-                       skbaddr, protocol, location):
+                  skbaddr, location, protocol):
        slocation = str(location)
        try:
                drop_log[slocation] = drop_log[slocation] + 1
index 9e9d348711953a44006b7c377a898e98c15b131d..fe702076ca46cc2d3d02bab818446c9d15f8c392 100644 (file)
@@ -2191,7 +2191,7 @@ int initialize_counters(int cpu_id)
 
 void allocate_output_buffer()
 {
-       output_buffer = calloc(1, (1 + topo.num_cpus) * 128);
+       output_buffer = calloc(1, (1 + topo.num_cpus) * 256);
        outp = output_buffer;
        if (outp == NULL) {
                perror("calloc");
index d4abc59ce1d9e519c1be61628d0433602163dddb..0a63658065f0e9e41285f521665b80b975cfe86c 100644 (file)
@@ -6,7 +6,6 @@ TARGETS += memory-hotplug
 TARGETS += mqueue
 TARGETS += net
 TARGETS += ptrace
-TARGETS += soft-dirty
 TARGETS += vm
 
 all:
diff --git a/tools/testing/selftests/soft-dirty/Makefile b/tools/testing/selftests/soft-dirty/Makefile
deleted file mode 100644 (file)
index a9cdc82..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-CFLAGS += -iquote../../../../include/uapi -Wall
-soft-dirty: soft-dirty.c
-
-all: soft-dirty
-
-clean:
-       rm -f soft-dirty
-
-run_tests: all
-       @./soft-dirty || echo "soft-dirty selftests: [FAIL]"
diff --git a/tools/testing/selftests/soft-dirty/soft-dirty.c b/tools/testing/selftests/soft-dirty/soft-dirty.c
deleted file mode 100644 (file)
index aba4f87..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/types.h>
-
-typedef unsigned long long u64;
-
-#define PME_PRESENT    (1ULL << 63)
-#define PME_SOFT_DIRTY (1Ull << 55)
-
-#define PAGES_TO_TEST  3
-#ifndef PAGE_SIZE
-#define PAGE_SIZE      4096
-#endif
-
-static void get_pagemap2(char *mem, u64 *map)
-{
-       int fd;
-
-       fd = open("/proc/self/pagemap2", O_RDONLY);
-       if (fd < 0) {
-               perror("Can't open pagemap2");
-               exit(1);
-       }
-
-       lseek(fd, (unsigned long)mem / PAGE_SIZE * sizeof(u64), SEEK_SET);
-       read(fd, map, sizeof(u64) * PAGES_TO_TEST);
-       close(fd);
-}
-
-static inline char map_p(u64 map)
-{
-       return map & PME_PRESENT ? 'p' : '-';
-}
-
-static inline char map_sd(u64 map)
-{
-       return map & PME_SOFT_DIRTY ? 'd' : '-';
-}
-
-static int check_pte(int step, int page, u64 *map, u64 want)
-{
-       if ((map[page] & want) != want) {
-               printf("Step %d Page %d has %c%c, want %c%c\n",
-                               step, page,
-                               map_p(map[page]), map_sd(map[page]),
-                               map_p(want), map_sd(want));
-               return 1;
-       }
-
-       return 0;
-}
-
-static void clear_refs(void)
-{
-       int fd;
-       char *v = "4";
-
-       fd = open("/proc/self/clear_refs", O_WRONLY);
-       if (write(fd, v, 3) < 3) {
-               perror("Can't clear soft-dirty bit");
-               exit(1);
-       }
-       close(fd);
-}
-
-int main(void)
-{
-       char *mem, x;
-       u64 map[PAGES_TO_TEST];
-
-       mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE,
-                       PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
-
-       x = mem[0];
-       mem[2 * PAGE_SIZE] = 'c';
-       get_pagemap2(mem, map);
-
-       if (check_pte(1, 0, map, PME_PRESENT))
-               return 1;
-       if (check_pte(1, 1, map, 0))
-               return 1;
-       if (check_pte(1, 2, map, PME_PRESENT | PME_SOFT_DIRTY))
-               return 1;
-
-       clear_refs();
-       get_pagemap2(mem, map);
-
-       if (check_pte(2, 0, map, PME_PRESENT))
-               return 1;
-       if (check_pte(2, 1, map, 0))
-               return 1;
-       if (check_pte(2, 2, map, PME_PRESENT))
-               return 1;
-
-       mem[0] = 'a';
-       mem[PAGE_SIZE] = 'b';
-       x = mem[2 * PAGE_SIZE];
-       get_pagemap2(mem, map);
-
-       if (check_pte(3, 0, map, PME_PRESENT | PME_SOFT_DIRTY))
-               return 1;
-       if (check_pte(3, 1, map, PME_PRESENT | PME_SOFT_DIRTY))
-               return 1;
-       if (check_pte(3, 2, map, PME_PRESENT))
-               return 1;
-
-       (void)x; /* gcc warn */
-
-       printf("PASS\n");
-       return 0;
-}